zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/Makefile b/ap/os/linux/linux-3.4.x/arch/arm/kernel/Makefile
new file mode 100644
index 0000000..b7f54c6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/Makefile
@@ -0,0 +1,85 @@
+#
+# Makefile for the linux kernel.
+#
+
+CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
+AFLAGS_head.o        := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_patch.o = -pg
+endif
+
+CFLAGS_REMOVE_return_address.o = -pg
+
+# Object file lists.
+
+obj-y		:= elf.o entry-armv.o entry-common.o irq.o opcodes.o \
+		   process.o ptrace.o return_address.o sched_clock.o \
+		   setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+
+CFLAGS_process.o += $(DISABLE_LTO)
+obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
+
+obj-$(CONFIG_LEDS)		+= leds.o
+obj-$(CONFIG_OC_ETM)		+= etm.o
+obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
+obj-$(CONFIG_ISA_DMA_API)	+= dma.o
+obj-$(CONFIG_FIQ)		+= fiq.o fiqasm.o
+obj-$(CONFIG_MODULES)		+= armksyms.o module.o
+obj-$(CONFIG_ARTHUR)		+= arthur.o
+obj-$(CONFIG_ISA_DMA)		+= dma-isa.o
+obj-$(CONFIG_PCI)		+= bios32.o isa.o
+obj-$(CONFIG_ARM_CPU_SUSPEND)	+= sleep.o suspend.o
+obj-$(CONFIG_SMP)		+= smp.o smp_tlb.o
+obj-$(CONFIG_HAVE_ARM_SCU)	+= smp_scu.o
+obj-$(CONFIG_HAVE_ARM_TWD)	+= smp_twd.o
+obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o insn.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o insn.o
+obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o insn.o patch.o
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KPROBES)		+= kprobes.o kprobes-common.o patch.o
+ifdef CONFIG_THUMB2_KERNEL
+obj-$(CONFIG_KPROBES)		+= kprobes-thumb.o
+else
+obj-$(CONFIG_KPROBES)		+= kprobes-arm.o
+endif
+obj-$(CONFIG_ARM_KPROBES_TEST)	+= test-kprobes.o
+test-kprobes-objs		:= kprobes-test.o
+ifdef CONFIG_THUMB2_KERNEL
+test-kprobes-objs		+= kprobes-test-thumb.o
+else
+test-kprobes-objs		+= kprobes-test-arm.o
+endif
+obj-$(CONFIG_ATAGS_PROC)	+= atags.o
+obj-$(CONFIG_OABI_COMPAT)	+= sys_oabi-compat.o
+obj-$(CONFIG_ARM_THUMBEE)	+= thumbee.o
+obj-$(CONFIG_KGDB)		+= kgdb.o
+obj-$(CONFIG_ARM_UNWIND)	+= unwind.o
+obj-$(CONFIG_HAVE_TCM)		+= tcm.o
+obj-$(CONFIG_OF)		+= devtree.o
+obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
+obj-$(CONFIG_SWP_EMULATE)	+= swp_emulate.o
+CFLAGS_swp_emulate.o		:= -Wa,-march=armv7-a
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
+
+obj-$(CONFIG_CPU_XSCALE)	+= xscale-cp0.o
+obj-$(CONFIG_CPU_XSC3)		+= xscale-cp0.o
+obj-$(CONFIG_CPU_MOHAWK)	+= xscale-cp0.o
+obj-$(CONFIG_CPU_PJ4)		+= pj4-cp0.o
+obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
+obj-$(CONFIG_CPU_HAS_PMU)	+= pmu.o
+obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
+AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
+obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
+
+ifneq ($(CONFIG_ARCH_EBSA110),y)
+  obj-y		+= io.o
+endif
+
+head-y			:= head$(MMUEXT).o
+obj-$(CONFIG_DEBUG_LL)	+= debug.o
+obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+
+extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/armksyms.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/armksyms.c
new file mode 100644
index 0000000..b57c75e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/armksyms.c
@@ -0,0 +1,163 @@
+/*
+ *  linux/arch/arm/kernel/armksyms.c
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/checksum.h>
+#include <asm/ftrace.h>
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler...  (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __do_div64(void);
+
+extern void __aeabi_idiv(void);
+extern void __aeabi_idivmod(void);
+extern void __aeabi_lasr(void);
+extern void __aeabi_llsl(void);
+extern void __aeabi_llsr(void);
+extern void __aeabi_lmul(void);
+extern void __aeabi_uidiv(void);
+extern void __aeabi_uidivmod(void);
+extern void __aeabi_ulcmp(void);
+
+extern void fpundefinstr(void);
+
+	/* platform dependent support */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__const_udelay);
+
+	/* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+	/* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+	/* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(__memzero);
+
+	/* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__clear_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+#endif
+
+	/* gcc lib functions */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__do_div64);
+
+#ifdef CONFIG_AEABI
+EXPORT_SYMBOL(__aeabi_idiv);
+EXPORT_SYMBOL(__aeabi_idivmod);
+EXPORT_SYMBOL(__aeabi_lasr);
+EXPORT_SYMBOL(__aeabi_llsl);
+EXPORT_SYMBOL(__aeabi_llsr);
+EXPORT_SYMBOL(__aeabi_lmul);
+EXPORT_SYMBOL(__aeabi_uidiv);
+EXPORT_SYMBOL(__aeabi_uidivmod);
+EXPORT_SYMBOL(__aeabi_ulcmp);
+#endif
+
+	/* bitops */
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+EXPORT_SYMBOL(_find_first_bit_le);
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(_find_first_zero_bit_be);
+EXPORT_SYMBOL(_find_next_zero_bit_be);
+EXPORT_SYMBOL(_find_first_bit_be);
+EXPORT_SYMBOL(_find_next_bit_be);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_OLD_MCOUNT
+EXPORT_SYMBOL(mcount);
+#endif
+EXPORT_SYMBOL(__gnu_mcount_nc);
+#endif
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+EXPORT_SYMBOL(__pv_phys_offset);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/arthur.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/arthur.c
new file mode 100644
index 0000000..321c529
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/arthur.c
@@ -0,0 +1,94 @@
+/*
+ *  linux/arch/arm/kernel/arthur.c
+ *
+ *  Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell
+ *
+ * Arthur personality
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/stddef.h>
+#include <linux/signal.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <asm/ptrace.h>
+
+/* Arthur doesn't have many signals, and a lot of those that it does
+   have don't map easily to any Linux equivalent.  Never mind.  */
+
+#define ARTHUR_SIGABRT		1
+#define ARTHUR_SIGFPE		2
+#define ARTHUR_SIGILL		3
+#define ARTHUR_SIGINT		4
+#define ARTHUR_SIGSEGV		5
+#define ARTHUR_SIGTERM		6
+#define ARTHUR_SIGSTAK		7
+#define ARTHUR_SIGUSR1		8
+#define ARTHUR_SIGUSR2		9
+#define ARTHUR_SIGOSERROR	10
+
+static unsigned long arthur_to_linux_signals[32] = {
+	0,	1,	2,	3,	4,	5,	6,	7,
+	8,	9,	10,	11,	12,	13,	14,	15,
+	16,	17,	18,	19,	20,	21,	22,	23,
+	24,	25,	26,	27,	28,	29,	30,	31
+};
+
+static unsigned long linux_to_arthur_signals[32] = {
+	0,		-1,		ARTHUR_SIGINT,	-1,
+       	ARTHUR_SIGILL,	5,		ARTHUR_SIGABRT,	7,
+	ARTHUR_SIGFPE,	9,		ARTHUR_SIGUSR1,	ARTHUR_SIGSEGV,	
+	ARTHUR_SIGUSR2,	13,		14,		ARTHUR_SIGTERM,
+	16,		17,		18,		19,
+	20,		21,		22,		23,
+	24,		25,		26,		27,
+	28,		29,		30,		31
+};
+
+static void arthur_lcall7(int nr, struct pt_regs *regs)
+{
+	struct siginfo info;
+	info.si_signo = SIGSWI;
+	info.si_errno = nr;
+	/* Bounce it to the emulator */
+	send_sig_info(SIGSWI, &info, current);
+}
+
+static struct exec_domain arthur_exec_domain = {
+	.name		= "Arthur",
+	.handler	= arthur_lcall7,
+	.pers_low	= PER_RISCOS,
+	.pers_high	= PER_RISCOS,
+	.signal_map	= arthur_to_linux_signals,
+	.signal_invmap	= linux_to_arthur_signals,
+	.module		= THIS_MODULE,
+};
+
+/*
+ * We could do with some locking to stop Arthur being removed while
+ * processes are using it.
+ */
+
+static int __init arthur_init(void)
+{
+	return register_exec_domain(&arthur_exec_domain);
+}
+
+static void __exit arthur_exit(void)
+{
+	unregister_exec_domain(&arthur_exec_domain);
+}
+
+module_init(arthur_init);
+module_exit(arthur_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/asm-offsets.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/asm-offsets.c
new file mode 100644
index 0000000..1429d89
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/asm-offsets.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 1995-2003 Russell King
+ *               2001-2002 Keith Owens
+ *     
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
+#include <asm/mach/arch.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/procinfo.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <linux/kbuild.h>
+
+/*
+ * Make sure that the compiler and target are compatible.
+ */
+#if defined(__APCS_26__)
+#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
+#endif
+/*
+ * GCC 3.0, 3.1: general bad code generation.
+ * GCC 3.2.0: incorrect function argument offset calculation.
+ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
+ *            (http://gcc.gnu.org/PR8896) and incorrect structure
+ *	      initialisation in fs/jffs2/erase.c
+ */
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#error Your compiler is too buggy; it is known to miscompile kernels.
+#error    Known good compilers: 3.3
+#endif
+
+int main(void)
+{
+  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
+#ifdef CONFIG_CC_STACKPROTECTOR
+  DEFINE(TSK_STACK_CANARY,	offsetof(struct task_struct, stack_canary));
+#endif
+  BLANK();
+  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
+  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
+  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
+  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
+  DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
+  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
+  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
+  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
+  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
+  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
+  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
+  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
+#ifdef CONFIG_SMP
+  DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
+#endif
+#ifdef CONFIG_ARM_THUMBEE
+  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
+#endif
+#ifdef CONFIG_IWMMXT
+  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
+#endif
+#ifdef CONFIG_CRUNCH
+  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
+#endif
+  BLANK();
+  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
+  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
+  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
+  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
+  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
+  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
+  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
+  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
+  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
+  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
+  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
+  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
+  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
+  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
+  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
+  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
+  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
+  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
+  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
+  BLANK();
+#ifdef CONFIG_CACHE_L2X0
+  DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));
+  DEFINE(L2X0_R_AUX_CTRL,	offsetof(struct l2x0_regs, aux_ctrl));
+  DEFINE(L2X0_R_TAG_LATENCY,	offsetof(struct l2x0_regs, tag_latency));
+  DEFINE(L2X0_R_DATA_LATENCY,	offsetof(struct l2x0_regs, data_latency));
+  DEFINE(L2X0_R_FILTER_START,	offsetof(struct l2x0_regs, filter_start));
+  DEFINE(L2X0_R_FILTER_END,	offsetof(struct l2x0_regs, filter_end));
+  DEFINE(L2X0_R_PREFETCH_CTRL,	offsetof(struct l2x0_regs, prefetch_ctrl));
+  DEFINE(L2X0_R_PWR_CTRL,	offsetof(struct l2x0_regs, pwr_ctrl));
+  BLANK();
+#endif
+#ifdef CONFIG_CPU_HAS_ASID
+  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
+  BLANK();
+#endif
+  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
+  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
+  BLANK();
+  DEFINE(VM_EXEC,	       	VM_EXEC);
+  BLANK();
+  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
+  BLANK();
+  DEFINE(SYS_ERROR0,		0x9f0000);
+  BLANK();
+  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
+  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
+  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
+  BLANK();
+  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
+  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
+  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
+  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
+  BLANK();
+#ifdef MULTI_DABORT
+  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
+#endif
+#ifdef MULTI_PABORT
+  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
+#endif
+#ifdef MULTI_CPU
+  DEFINE(CPU_SLEEP_SIZE,	offsetof(struct processor, suspend_size));
+  DEFINE(CPU_DO_SUSPEND,	offsetof(struct processor, do_suspend));
+  DEFINE(CPU_DO_RESUME,		offsetof(struct processor, do_resume));
+#endif
+#ifdef MULTI_CACHE
+  DEFINE(CACHE_FLUSH_KERN_ALL,	offsetof(struct cpu_cache_fns, flush_kern_all));
+#endif
+  BLANK();
+  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
+  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
+  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
+  return 0; 
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.c
new file mode 100644
index 0000000..42a1a14
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.c
@@ -0,0 +1,83 @@
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <asm/setup.h>
+#include <asm/types.h>
+#include <asm/page.h>
+
+struct buffer {
+	size_t size;
+	char data[];
+};
+
+static int
+read_buffer(char* page, char** start, off_t off, int count,
+	int* eof, void* data)
+{
+	struct buffer *buffer = (struct buffer *)data;
+
+	if (off >= buffer->size) {
+		*eof = 1;
+		return 0;
+	}
+
+	count = min((int) (buffer->size - off), count);
+
+	memcpy(page, &buffer->data[off], count);
+
+	return count;
+}
+
+#define BOOT_PARAMS_SIZE 1536
+static char __initdata atags_copy[BOOT_PARAMS_SIZE];
+
+void __init save_atags(const struct tag *tags)
+{
+	memcpy(atags_copy, tags, sizeof(atags_copy));
+}
+
+static int __init init_atags_procfs(void)
+{
+	/*
+	 * This cannot go into save_atags() because kmalloc and proc don't work
+	 * yet when it is called.
+	 */
+	struct proc_dir_entry *tags_entry;
+	struct tag *tag = (struct tag *)atags_copy;
+	struct buffer *b;
+	size_t size;
+
+	if (tag->hdr.tag != ATAG_CORE) {
+		printk(KERN_INFO "No ATAGs?");
+		return -EINVAL;
+	}
+
+	for (; tag->hdr.size; tag = tag_next(tag))
+		;
+
+	/* include the terminating ATAG_NONE */
+	size = (char *)tag - atags_copy + sizeof(struct tag_header);
+
+	WARN_ON(tag->hdr.tag != ATAG_NONE);
+
+	b = kmalloc(sizeof(*b) + size, GFP_KERNEL);
+	if (!b)
+		goto nomem;
+
+	b->size = size;
+	memcpy(b->data, atags_copy, size);
+
+	tags_entry = create_proc_read_entry("atags", 0400,
+			NULL, read_buffer, b);
+
+	if (!tags_entry)
+		goto nomem;
+
+	return 0;
+
+nomem:
+	kfree(b);
+	printk(KERN_ERR "Exporting ATAGs: not enough memory\n");
+
+	return -ENOMEM;
+}
+arch_initcall(init_atags_procfs);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.h
new file mode 100644
index 0000000..e5f028d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/atags.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(struct tag *tags);
+#else
+static inline void save_atags(struct tag *tags) { }
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/bios32.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/bios32.c
new file mode 100644
index 0000000..ede5f77
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/bios32.c
@@ -0,0 +1,614 @@
+/*
+ *  linux/arch/arm/kernel/bios32.c
+ *
+ *  PCI bios-type initialisation for PCI machines
+ *
+ *  Bits taken from various places.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/pci.h>
+
+static int debug_pci;
+
+/*
+ * We can't use pci_find_device() here since we are
+ * called from interrupt context.
+ */
+static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		u16 status;
+
+		/*
+		 * ignore host bridge - we handle
+		 * that separately
+		 */
+		if (dev->bus->number == 0 && dev->devfn == 0)
+			continue;
+
+		pci_read_config_word(dev, PCI_STATUS, &status);
+		if (status == 0xffff)
+			continue;
+
+		if ((status & status_mask) == 0)
+			continue;
+
+		/* clear the status errors */
+		pci_write_config_word(dev, PCI_STATUS, status & status_mask);
+
+		if (warn)
+			printk("(%s: %04X) ", pci_name(dev), status);
+	}
+
+	list_for_each_entry(dev, &bus->devices, bus_list)
+		if (dev->subordinate)
+			pcibios_bus_report_status(dev->subordinate, status_mask, warn);
+}
+
+void pcibios_report_status(u_int status_mask, int warn)
+{
+	struct list_head *l;
+
+	list_for_each(l, &pci_root_buses) {
+		struct pci_bus *bus = pci_bus_b(l);
+
+		pcibios_bus_report_status(bus, status_mask, warn);
+	}
+}
+
+/*
+ * We don't use this to fix the device, but initialisation of it.
+ * It's not the correct use for this, but it works.
+ * Note that the arbiter/ISA bridge appears to be buggy, specifically in
+ * the following area:
+ * 1. park on CPU
+ * 2. ISA bridge ping-pong
+ * 3. ISA bridge master handling of target RETRY
+ *
+ * Bug 3 is responsible for the sound DMA grinding to a halt.  We now
+ * live with bug 2.
+ */
+static void __devinit pci_fixup_83c553(struct pci_dev *dev)
+{
+	/*
+	 * Set memory region to start at address 0, and enable IO
+	 */
+	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
+	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
+
+	dev->resource[0].end -= dev->resource[0].start;
+	dev->resource[0].start = 0;
+
+	/*
+	 * All memory requests from ISA to be channelled to PCI
+	 */
+	pci_write_config_byte(dev, 0x48, 0xff);
+
+	/*
+	 * Enable ping-pong on bus master to ISA bridge transactions.
+	 * This improves the sound DMA substantially.  The fixed
+	 * priority arbiter also helps (see below).
+	 */
+	pci_write_config_byte(dev, 0x42, 0x01);
+
+	/*
+	 * Enable PCI retry
+	 */
+	pci_write_config_byte(dev, 0x40, 0x22);
+
+	/*
+	 * We used to set the arbiter to "park on last master" (bit
+	 * 1 set), but unfortunately the CyberPro does not park the
+	 * bus.  We must therefore park on CPU.  Unfortunately, this
+	 * may trigger yet another bug in the 553.
+	 */
+	pci_write_config_byte(dev, 0x83, 0x02);
+
+	/*
+	 * Make the ISA DMA request lowest priority, and disable
+	 * rotating priorities completely.
+	 */
+	pci_write_config_byte(dev, 0x80, 0x11);
+	pci_write_config_byte(dev, 0x81, 0x00);
+
+	/*
+	 * Route INTA input to IRQ 11, and set IRQ11 to be level
+	 * sensitive.
+	 */
+	pci_write_config_word(dev, 0x44, 0xb000);
+	outb(0x08, 0x4d1);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
+
+static void __devinit pci_fixup_unassign(struct pci_dev *dev)
+{
+	dev->resource[0].end -= dev->resource[0].start;
+	dev->resource[0].start = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
+
+/*
+ * Prevent the PCI layer from seeing the resources allocated to this device
+ * if it is the host bridge by marking it as such.  These resources are of
+ * no consequence to the PCI layer (they are handled elsewhere).
+ */
+static void __devinit pci_fixup_dec21285(struct pci_dev *dev)
+{
+	int i;
+
+	if (dev->devfn == 0) {
+		dev->class &= 0xff;
+		dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+			dev->resource[i].start = 0;
+			dev->resource[i].end   = 0;
+			dev->resource[i].flags = 0;
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
+
+/*
+ * PCI IDE controllers use non-standard I/O port decoding, respect it.
+ */
+static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
+{
+	struct resource *r;
+	int i;
+
+	if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+		return;
+
+	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+		r = dev->resource + i;
+		if ((r->start & ~0x80) == 0x374) {
+			r->start |= 2;
+			r->end = r->start;
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
+
+/*
+ * Put the DEC21142 to sleep
+ */
+static void __devinit pci_fixup_dec21142(struct pci_dev *dev)
+{
+	pci_write_config_dword(dev, 0x40, 0x80000000);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
+
+/*
+ * The CY82C693 needs some rather major fixups to ensure that it does
+ * the right thing.  Idea from the Alpha people, with a few additions.
+ *
+ * We ensure that the IDE base registers are set to 1f0/3f4 for the
+ * primary bus, and 170/374 for the secondary bus.  Also, hide them
+ * from the PCI subsystem view as well so we won't try to perform
+ * our own auto-configuration on them.
+ *
+ * In addition, we ensure that the PCI IDE interrupts are routed to
+ * IRQ 14 and IRQ 15 respectively.
+ *
+ * The above gets us to a point where the IDE on this device is
+ * functional.  However, The CY82C693U _does not work_ in bus
+ * master mode without locking the PCI bus solid.
+ */
+static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
+{
+	if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+		u32 base0, base1;
+
+		if (dev->class & 0x80) {	/* primary */
+			base0 = 0x1f0;
+			base1 = 0x3f4;
+		} else {			/* secondary */
+			base0 = 0x170;
+			base1 = 0x374;
+		}
+
+		pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
+				       base0 | PCI_BASE_ADDRESS_SPACE_IO);
+		pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
+				       base1 | PCI_BASE_ADDRESS_SPACE_IO);
+
+		dev->resource[0].start = 0;
+		dev->resource[0].end   = 0;
+		dev->resource[0].flags = 0;
+
+		dev->resource[1].start = 0;
+		dev->resource[1].end   = 0;
+		dev->resource[1].flags = 0;
+	} else if (PCI_FUNC(dev->devfn) == 0) {
+		/*
+		 * Setup IDE IRQ routing.
+		 */
+		pci_write_config_byte(dev, 0x4b, 14);
+		pci_write_config_byte(dev, 0x4c, 15);
+
+		/*
+		 * Disable FREQACK handshake, enable USB.
+		 */
+		pci_write_config_byte(dev, 0x4d, 0x41);
+
+		/*
+		 * Enable PCI retry, and PCI post-write buffer.
+		 */
+		pci_write_config_byte(dev, 0x44, 0x17);
+
+		/*
+		 * Enable ISA master and DMA post write buffering.
+		 */
+		pci_write_config_byte(dev, 0x45, 0x03);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
+
+static void __init pci_fixup_it8152(struct pci_dev *dev)
+{
+	int i;
+	/* fixup for ITE 8152 devices */
+	/* FIXME: add defines for class 0x68000 and 0x80103 */
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
+	    dev->class == 0x68000 ||
+	    dev->class == 0x80103) {
+		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+			dev->resource[i].start = 0;
+			dev->resource[i].end   = 0;
+			dev->resource[i].flags = 0;
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
+
+
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+	if (debug_pci)
+		printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev));
+	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * If the bus contains any of these devices, then we must not turn on
+ * parity checking of any kind.  Currently this is CyberPro 20x0 only.
+ */
+static inline int pdev_bad_for_parity(struct pci_dev *dev)
+{
+	return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
+		 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
+		  dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
+		(dev->vendor == PCI_VENDOR_ID_ITE &&
+		 dev->device == PCI_DEVICE_ID_ITE_8152));
+
+}
+
+/*
+ * pcibios_fixup_bus - Called after each bus is probed,
+ * but before its children are examined.
+ */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+	u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
+
+	/*
+	 * Walk the devices on this bus, working out what we can
+	 * and can't support.
+	 */
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		u16 status;
+
+		pci_read_config_word(dev, PCI_STATUS, &status);
+
+		/*
+		 * If any device on this bus does not support fast back
+		 * to back transfers, then the bus as a whole is not able
+		 * to support them.  Having fast back to back transfers
+		 * on saves us one PCI cycle per transaction.
+		 */
+		if (!(status & PCI_STATUS_FAST_BACK))
+			features &= ~PCI_COMMAND_FAST_BACK;
+
+		if (pdev_bad_for_parity(dev))
+			features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+		switch (dev->class >> 8) {
+		case PCI_CLASS_BRIDGE_PCI:
+			pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
+			status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
+			status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
+			pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
+			break;
+
+		case PCI_CLASS_BRIDGE_CARDBUS:
+			pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
+			status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
+			pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
+			break;
+		}
+	}
+
+	/*
+	 * Now walk the devices again, this time setting them up.
+	 */
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		u16 cmd;
+
+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
+		cmd |= features;
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+				      L1_CACHE_BYTES >> 2);
+	}
+
+	/*
+	 * Propagate the flags to the PCI bridge.
+	 */
+	if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+		if (features & PCI_COMMAND_FAST_BACK)
+			bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
+		if (features & PCI_COMMAND_PARITY)
+			bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
+	}
+
+	/*
+	 * Report what we did for this bus
+	 */
+	printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
+		bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
+}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+#endif
+
+/*
+ * Swizzle the device pin each time we cross a bridge.
+ * This might update pin and returns the slot number.
+ */
+static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
+{
+	struct pci_sys_data *sys = dev->sysdata;
+	int slot = 0, oldpin = *pin;
+
+	if (sys->swizzle)
+		slot = sys->swizzle(dev, pin);
+
+	if (debug_pci)
+		printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
+			pci_name(dev), oldpin, *pin, slot);
+
+	return slot;
+}
+
+/*
+ * Map a slot/pin to an IRQ.
+ */
+static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct pci_sys_data *sys = dev->sysdata;
+	int irq = -1;
+
+	if (sys->map_irq)
+		irq = sys->map_irq(dev, slot, pin);
+
+	if (debug_pci)
+		printk("PCI: %s mapping slot %d pin %d => irq %d\n",
+			pci_name(dev), slot, pin, irq);
+
+	return irq;
+}
+
+static void __init pcibios_init_hw(struct hw_pci *hw)
+{
+	struct pci_sys_data *sys = NULL;
+	int ret;
+	int nr, busnr;
+
+	for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
+		sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
+		if (!sys)
+			panic("PCI: unable to allocate sys data!");
+
+#ifdef CONFIG_PCI_DOMAINS
+		sys->domain  = hw->domain;
+#endif
+		sys->hw      = hw;
+		sys->busnr   = busnr;
+		sys->swizzle = hw->swizzle;
+		sys->map_irq = hw->map_irq;
+		INIT_LIST_HEAD(&sys->resources);
+
+		ret = hw->setup(nr, sys);
+
+		if (ret > 0) {
+			if (list_empty(&sys->resources)) {
+				pci_add_resource_offset(&sys->resources,
+					 &ioport_resource, sys->io_offset);
+				pci_add_resource_offset(&sys->resources,
+					 &iomem_resource, sys->mem_offset);
+			}
+
+			sys->bus = hw->scan(nr, sys);
+
+			if (!sys->bus)
+				panic("PCI: unable to scan bus!");
+
+			busnr = sys->bus->subordinate + 1;
+
+			list_add(&sys->node, &hw->buses);
+		} else {
+			kfree(sys);
+			if (ret < 0)
+				break;
+		}
+	}
+}
+
+void __init pci_common_init(struct hw_pci *hw)
+{
+	struct pci_sys_data *sys;
+
+	INIT_LIST_HEAD(&hw->buses);
+
+	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+	if (hw->preinit)
+		hw->preinit();
+	pcibios_init_hw(hw);
+	if (hw->postinit)
+		hw->postinit();
+
+	pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
+
+	list_for_each_entry(sys, &hw->buses, node) {
+		struct pci_bus *bus = sys->bus;
+
+		if (!pci_has_flag(PCI_PROBE_ONLY)) {
+			/*
+			 * Size the bridge windows.
+			 */
+			pci_bus_size_bridges(bus);
+
+			/*
+			 * Assign resources.
+			 */
+			pci_bus_assign_resources(bus);
+
+			/*
+			 * Enable bridges
+			 */
+			pci_enable_bridges(bus);
+		}
+
+		/*
+		 * Tell drivers about devices found.
+		 */
+		pci_bus_add_devices(bus);
+	}
+}
+
+#ifndef CONFIG_PCI_HOST_ITE8152
+void pcibios_set_master(struct pci_dev *dev)
+{
+	/* No special bus mastering setup handling */
+}
+#endif
+
+char * __init pcibios_setup(char *str)
+{
+	if (!strcmp(str, "debug")) {
+		debug_pci = 1;
+		return NULL;
+	} else if (!strcmp(str, "firmware")) {
+		pci_add_flags(PCI_PROBE_ONLY);
+		return NULL;
+	}
+	return str;
+}
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
+{
+	resource_size_t start = res->start;
+
+	if (res->flags & IORESOURCE_IO && start & 0x300)
+		start = (start + 0x3ff) & ~0x3ff;
+
+	start = (start + align - 1) & ~(align - 1);
+
+	return start;
+}
+
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	u16 cmd, old_cmd;
+	int idx;
+	struct resource *r;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	old_cmd = cmd;
+	for (idx = 0; idx < 6; idx++) {
+		/* Only set up the requested stuff */
+		if (!(mask & (1 << idx)))
+			continue;
+
+		r = dev->resource + idx;
+		if (!r->start && r->end) {
+			printk(KERN_ERR "PCI: Device %s not available because"
+			       " of resource collisions\n", pci_name(dev));
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			cmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			cmd |= PCI_COMMAND_MEMORY;
+	}
+
+	/*
+	 * Bridges (eg, cardbus bridges) need to be fully enabled
+	 */
+	if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+		cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+	if (cmd != old_cmd) {
+		printk("PCI: enabling device %s (%04x -> %04x)\n",
+		       pci_name(dev), old_cmd, cmd);
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+	return 0;
+}
+
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state, int write_combine)
+{
+	struct pci_sys_data *root = dev->sysdata;
+	unsigned long phys;
+
+	if (mmap_state == pci_mmap_io) {
+		return -EINVAL;
+	} else {
+		phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
+	}
+
+	/*
+	 * Mark this as IO
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (remap_pfn_range(vma, vma->vm_start, phys,
+			     vma->vm_end - vma->vm_start,
+			     vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/calls.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/calls.S
new file mode 100755
index 0000000..ce558d2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/calls.S
@@ -0,0 +1,409 @@
+/*
+ *  linux/arch/arm/kernel/calls.S
+ *
+ *  Copyright (C) 1995-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file is included thrice in entry-common.S
+ */
+/* 0 */		CALL(sys_restart_syscall)
+		CALL(sys_exit)
+		CALL(sys_fork_wrapper)
+		CALL(sys_read)
+		CALL(sys_write)
+/* 5 */		CALL(sys_open)
+		CALL(sys_close)
+		CALL(sys_ni_syscall)		/* was sys_waitpid */
+		CALL(sys_creat)
+		CALL(sys_link)
+/* 10 */	CALL(sys_unlink)
+		CALL(sys_execve_wrapper)
+		CALL(sys_chdir)
+		CALL(OBSOLETE(sys_time))	/* used by libc4 */
+		CALL(sys_mknod)
+/* 15 */	CALL(sys_chmod)
+		CALL(sys_lchown16)
+		CALL(sys_ni_syscall)		/* was sys_break */
+		CALL(sys_ni_syscall)		/* was sys_stat */
+		CALL(sys_lseek)
+/* 20 */	CALL(sys_getpid)
+		CALL(sys_mount)
+		CALL(OBSOLETE(sys_oldumount))	/* used by libc4 */
+		CALL(sys_setuid16)
+		CALL(sys_getuid16)
+/* 25 */	CALL(OBSOLETE(sys_stime))
+		CALL(sys_ptrace)
+		CALL(OBSOLETE(sys_alarm))	/* used by libc4 */
+		CALL(sys_ni_syscall)		/* was sys_fstat */
+		CALL(sys_pause)
+/* 30 */	CALL(OBSOLETE(sys_utime))	/* used by libc4 */
+		CALL(sys_ni_syscall)		/* was sys_stty */
+		CALL(sys_ni_syscall)		/* was sys_getty */
+		CALL(sys_access)
+		CALL(sys_nice)
+/* 35 */	CALL(sys_ni_syscall)		/* was sys_ftime */
+		CALL(sys_sync)
+		CALL(sys_kill)
+		CALL(sys_rename)
+		CALL(sys_mkdir)
+/* 40 */	CALL(sys_rmdir)
+		CALL(sys_dup)
+		CALL(sys_pipe)
+		CALL(sys_times)
+		CALL(sys_ni_syscall)		/* was sys_prof */
+/* 45 */	CALL(sys_brk)
+		CALL(sys_setgid16)
+		CALL(sys_getgid16)
+		CALL(sys_ni_syscall)		/* was sys_signal */
+		CALL(sys_geteuid16)
+/* 50 */	CALL(sys_getegid16)
+		CALL(sys_acct)
+		CALL(sys_umount)
+		CALL(sys_ni_syscall)		/* was sys_lock */
+		CALL(sys_ioctl)
+/* 55 */	CALL(sys_fcntl)
+		CALL(sys_ni_syscall)		/* was sys_mpx */
+		CALL(sys_setpgid)
+		CALL(sys_ni_syscall)		/* was sys_ulimit */
+		CALL(sys_ni_syscall)		/* was sys_olduname */
+/* 60 */	CALL(sys_umask)
+		CALL(sys_chroot)
+		CALL(sys_ustat)
+		CALL(sys_dup2)
+		CALL(sys_getppid)
+/* 65 */	CALL(sys_getpgrp)
+		CALL(sys_setsid)
+		CALL(sys_sigaction)
+		CALL(sys_ni_syscall)		/* was sys_sgetmask */
+		CALL(sys_ni_syscall)		/* was sys_ssetmask */
+/* 70 */	CALL(sys_setreuid16)
+		CALL(sys_setregid16)
+		CALL(sys_sigsuspend)
+		CALL(sys_sigpending)
+		CALL(sys_sethostname)
+/* 75 */	CALL(sys_setrlimit)
+		CALL(OBSOLETE(sys_old_getrlimit)) /* used by libc4 */
+		CALL(sys_getrusage)
+		CALL(sys_gettimeofday)
+		CALL(sys_settimeofday)
+/* 80 */	CALL(sys_getgroups16)
+		CALL(sys_setgroups16)
+		CALL(OBSOLETE(sys_old_select))	/* used by libc4 */
+		CALL(sys_symlink)
+		CALL(sys_ni_syscall)		/* was sys_lstat */
+/* 85 */	CALL(sys_readlink)
+		CALL(sys_uselib)
+		CALL(sys_swapon)
+		CALL(sys_reboot)
+		CALL(OBSOLETE(sys_old_readdir))	/* used by libc4 */
+/* 90 */	CALL(OBSOLETE(sys_old_mmap))	/* used by libc4 */
+		CALL(sys_munmap)
+		CALL(sys_truncate)
+		CALL(sys_ftruncate)
+		CALL(sys_fchmod)
+/* 95 */	CALL(sys_fchown16)
+		CALL(sys_getpriority)
+		CALL(sys_setpriority)
+		CALL(sys_ni_syscall)		/* was sys_profil */
+		CALL(sys_statfs)
+/* 100 */	CALL(sys_fstatfs)
+		CALL(sys_ni_syscall)		/* sys_ioperm */
+		CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall)))
+		CALL(sys_syslog)
+		CALL(sys_setitimer)
+/* 105 */	CALL(sys_getitimer)
+		CALL(sys_newstat)
+		CALL(sys_newlstat)
+		CALL(sys_newfstat)
+		CALL(sys_ni_syscall)		/* was sys_uname */
+/* 110 */	CALL(sys_ni_syscall)		/* was sys_iopl */
+		CALL(sys_vhangup)
+		CALL(sys_ni_syscall)
+		CALL(OBSOLETE(sys_syscall))	/* call a syscall */
+		CALL(sys_wait4)
+/* 115 */	CALL(sys_swapoff)
+		CALL(sys_sysinfo)
+		CALL(OBSOLETE(ABI(sys_ipc, sys_oabi_ipc)))
+		CALL(sys_fsync)
+		CALL(sys_sigreturn_wrapper)
+/* 120 */	CALL(sys_clone_wrapper)
+		CALL(sys_setdomainname)
+		CALL(sys_newuname)
+		CALL(sys_ni_syscall)		/* modify_ldt */
+		CALL(sys_adjtimex)
+/* 125 */	CALL(sys_mprotect)
+		CALL(sys_sigprocmask)
+		CALL(sys_ni_syscall)		/* was sys_create_module */
+		CALL(sys_init_module)
+		CALL(sys_delete_module)
+/* 130 */	CALL(sys_ni_syscall)		/* was sys_get_kernel_syms */
+		CALL(sys_quotactl)
+		CALL(sys_getpgid)
+		CALL(sys_fchdir)
+		CALL(sys_bdflush)
+/* 135 */	CALL(sys_sysfs)
+		CALL(sys_personality)
+		CALL(sys_ni_syscall)		/* reserved for afs_syscall */
+		CALL(sys_setfsuid16)
+		CALL(sys_setfsgid16)
+/* 140 */	CALL(sys_llseek)
+		CALL(sys_getdents)
+		CALL(sys_select)
+		CALL(sys_flock)
+		CALL(sys_msync)
+/* 145 */	CALL(sys_readv)
+		CALL(sys_writev)
+		CALL(sys_getsid)
+		CALL(sys_fdatasync)
+		CALL(sys_sysctl)
+/* 150 */	CALL(sys_mlock)
+		CALL(sys_munlock)
+		CALL(sys_mlockall)
+		CALL(sys_munlockall)
+		CALL(sys_sched_setparam)
+/* 155 */	CALL(sys_sched_getparam)
+		CALL(sys_sched_setscheduler)
+		CALL(sys_sched_getscheduler)
+		CALL(sys_sched_yield)
+		CALL(sys_sched_get_priority_max)
+/* 160 */	CALL(sys_sched_get_priority_min)
+		CALL(sys_sched_rr_get_interval)
+		CALL(sys_nanosleep)
+		CALL(sys_mremap)
+		CALL(sys_setresuid16)
+/* 165 */	CALL(sys_getresuid16)
+		CALL(sys_ni_syscall)		/* vm86 */
+		CALL(sys_ni_syscall)		/* was sys_query_module */
+		CALL(sys_poll)
+		CALL(sys_ni_syscall)		/* was nfsservctl */
+/* 170 */	CALL(sys_setresgid16)
+		CALL(sys_getresgid16)
+		CALL(sys_prctl)
+		CALL(sys_rt_sigreturn_wrapper)
+		CALL(sys_rt_sigaction)
+/* 175 */	CALL(sys_rt_sigprocmask)
+		CALL(sys_rt_sigpending)
+		CALL(sys_rt_sigtimedwait)
+		CALL(sys_rt_sigqueueinfo)
+		CALL(sys_rt_sigsuspend)
+/* 180 */	CALL(ABI(sys_pread64, sys_oabi_pread64))
+		CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
+		CALL(sys_chown16)
+		CALL(sys_getcwd)
+		CALL(sys_capget)
+/* 185 */	CALL(sys_capset)
+		CALL(sys_sigaltstack_wrapper)
+		CALL(sys_sendfile)
+		CALL(sys_ni_syscall)		/* getpmsg */
+		CALL(sys_ni_syscall)		/* putpmsg */
+/* 190 */	CALL(sys_vfork_wrapper)
+		CALL(sys_getrlimit)
+		CALL(sys_mmap2)
+		CALL(ABI(sys_truncate64, sys_oabi_truncate64))
+		CALL(ABI(sys_ftruncate64, sys_oabi_ftruncate64))
+/* 195 */	CALL(ABI(sys_stat64, sys_oabi_stat64))
+		CALL(ABI(sys_lstat64, sys_oabi_lstat64))
+		CALL(ABI(sys_fstat64, sys_oabi_fstat64))
+		CALL(sys_lchown)
+		CALL(sys_getuid)
+/* 200 */	CALL(sys_getgid)
+		CALL(sys_geteuid)
+		CALL(sys_getegid)
+		CALL(sys_setreuid)
+		CALL(sys_setregid)
+/* 205 */	CALL(sys_getgroups)
+		CALL(sys_setgroups)
+		CALL(sys_fchown)
+		CALL(sys_setresuid)
+		CALL(sys_getresuid)
+/* 210 */	CALL(sys_setresgid)
+		CALL(sys_getresgid)
+		CALL(sys_chown)
+		CALL(sys_setuid)
+		CALL(sys_setgid)
+/* 215 */	CALL(sys_setfsuid)
+		CALL(sys_setfsgid)
+		CALL(sys_getdents64)
+		CALL(sys_pivot_root)
+		CALL(sys_mincore)
+/* 220 */	CALL(sys_madvise)
+		CALL(ABI(sys_fcntl64, sys_oabi_fcntl64))
+		CALL(sys_ni_syscall) /* TUX */
+		CALL(sys_ni_syscall)
+		CALL(sys_gettid)
+/* 225 */	CALL(ABI(sys_readahead, sys_oabi_readahead))
+		CALL(sys_setxattr)
+		CALL(sys_lsetxattr)
+		CALL(sys_fsetxattr)
+		CALL(sys_getxattr)
+/* 230 */	CALL(sys_lgetxattr)
+		CALL(sys_fgetxattr)
+		CALL(sys_listxattr)
+		CALL(sys_llistxattr)
+		CALL(sys_flistxattr)
+/* 235 */	CALL(sys_removexattr)
+		CALL(sys_lremovexattr)
+		CALL(sys_fremovexattr)
+		CALL(sys_tkill)
+		CALL(sys_sendfile64)
+/* 240 */	CALL(sys_futex)
+		CALL(sys_sched_setaffinity)
+		CALL(sys_sched_getaffinity)
+		CALL(sys_io_setup)
+		CALL(sys_io_destroy)
+/* 245 */	CALL(sys_io_getevents)
+		CALL(sys_io_submit)
+		CALL(sys_io_cancel)
+		CALL(sys_exit_group)
+		CALL(sys_lookup_dcookie)
+/* 250 */	CALL(sys_epoll_create)
+		CALL(ABI(sys_epoll_ctl, sys_oabi_epoll_ctl))
+		CALL(ABI(sys_epoll_wait, sys_oabi_epoll_wait))
+		CALL(sys_remap_file_pages)
+		CALL(sys_ni_syscall)	/* sys_set_thread_area */
+/* 255 */	CALL(sys_ni_syscall)	/* sys_get_thread_area */
+		CALL(sys_set_tid_address)
+		CALL(sys_timer_create)
+		CALL(sys_timer_settime)
+		CALL(sys_timer_gettime)
+/* 260 */	CALL(sys_timer_getoverrun)
+		CALL(sys_timer_delete)
+		CALL(sys_clock_settime)
+		CALL(sys_clock_gettime)
+		CALL(sys_clock_getres)
+/* 265 */	CALL(sys_clock_nanosleep)
+		CALL(sys_statfs64_wrapper)
+		CALL(sys_fstatfs64_wrapper)
+		CALL(sys_tgkill)
+		CALL(sys_utimes)
+/* 270 */	CALL(sys_arm_fadvise64_64)
+		CALL(sys_pciconfig_iobase)
+		CALL(sys_pciconfig_read)
+		CALL(sys_pciconfig_write)
+		CALL(sys_mq_open)
+/* 275 */	CALL(sys_mq_unlink)
+		CALL(sys_mq_timedsend)
+		CALL(sys_mq_timedreceive)
+		CALL(sys_mq_notify)
+		CALL(sys_mq_getsetattr)
+/* 280 */	CALL(sys_waitid)
+		CALL(sys_socket)
+		CALL(ABI(sys_bind, sys_oabi_bind))
+		CALL(ABI(sys_connect, sys_oabi_connect))
+		CALL(sys_listen)
+/* 285 */	CALL(sys_accept)
+		CALL(sys_getsockname)
+		CALL(sys_getpeername)
+		CALL(sys_socketpair)
+		CALL(sys_send)
+/* 290 */	CALL(ABI(sys_sendto, sys_oabi_sendto))
+		CALL(sys_recv)
+		CALL(sys_recvfrom)
+		CALL(sys_shutdown)
+		CALL(sys_setsockopt)
+/* 295 */	CALL(sys_getsockopt)
+		CALL(ABI(sys_sendmsg, sys_oabi_sendmsg))
+		CALL(sys_recvmsg)
+		CALL(ABI(sys_semop, sys_oabi_semop))
+		CALL(sys_semget)
+/* 300 */	CALL(sys_semctl)
+		CALL(sys_msgsnd)
+		CALL(sys_msgrcv)
+		CALL(sys_msgget)
+		CALL(sys_msgctl)
+/* 305 */	CALL(sys_shmat)
+		CALL(sys_shmdt)
+		CALL(sys_shmget)
+		CALL(sys_shmctl)
+		CALL(sys_add_key)
+/* 310 */	CALL(sys_request_key)
+		CALL(sys_keyctl)
+		CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
+/* vserver */	CALL(sys_ni_syscall)
+		CALL(sys_ioprio_set)
+/* 315 */	CALL(sys_ioprio_get)
+		CALL(sys_inotify_init)
+		CALL(sys_inotify_add_watch)
+		CALL(sys_inotify_rm_watch)
+		CALL(sys_mbind)
+/* 320 */	CALL(sys_get_mempolicy)
+		CALL(sys_set_mempolicy)
+		CALL(sys_openat)
+		CALL(sys_mkdirat)
+		CALL(sys_mknodat)
+/* 325 */	CALL(sys_fchownat)
+		CALL(sys_futimesat)
+		CALL(ABI(sys_fstatat64,  sys_oabi_fstatat64))
+		CALL(sys_unlinkat)
+		CALL(sys_renameat)
+/* 330 */	CALL(sys_linkat)
+		CALL(sys_symlinkat)
+		CALL(sys_readlinkat)
+		CALL(sys_fchmodat)
+		CALL(sys_faccessat)
+/* 335 */	CALL(sys_pselect6)
+		CALL(sys_ppoll)
+		CALL(sys_unshare)
+		CALL(sys_set_robust_list)
+		CALL(sys_get_robust_list)
+/* 340 */	CALL(sys_splice)
+		CALL(sys_sync_file_range2)
+		CALL(sys_tee)
+		CALL(sys_vmsplice)
+		CALL(sys_move_pages)
+/* 345 */	CALL(sys_getcpu)
+		CALL(sys_epoll_pwait)
+		CALL(sys_kexec_load)
+		CALL(sys_utimensat)
+		CALL(sys_signalfd)
+/* 350 */	CALL(sys_timerfd_create)
+		CALL(sys_eventfd)
+		CALL(sys_fallocate)
+		CALL(sys_timerfd_settime)
+		CALL(sys_timerfd_gettime)
+/* 355 */	CALL(sys_signalfd4)
+		CALL(sys_eventfd2)
+		CALL(sys_epoll_create1)
+		CALL(sys_dup3)
+		CALL(sys_pipe2)
+/* 360 */	CALL(sys_inotify_init1)
+		CALL(sys_preadv)
+		CALL(sys_pwritev)
+		CALL(sys_rt_tgsigqueueinfo)
+		CALL(sys_perf_event_open)
+/* 365 */	CALL(sys_recvmmsg)
+		CALL(sys_accept4)
+		CALL(sys_fanotify_init)
+		CALL(sys_fanotify_mark)
+		CALL(sys_prlimit64)
+/* 370 */	CALL(sys_name_to_handle_at)
+		CALL(sys_open_by_handle_at)
+		CALL(sys_clock_adjtime)
+		CALL(sys_syncfs)
+		CALL(sys_sendmmsg)
+/* 375 */	CALL(sys_setns)
+		CALL(sys_process_vm_readv)
+		CALL(sys_process_vm_writev)
+/* 378 */	CALL(sys_get_lcdinfo)
+		CALL(sys_get_wifiinfo)
+		CALL(sys_get_flashinfo)
+		CALL(sys_get_cpuinfo)
+		CALL(sys_get_ddrinfo)
+		CALL(sys_set_cidstate)
+		CALL(sys_get_cidstate)
+		CALL(sys_jffs2_quick_gc_wait_done)
+		CALL(sys_get_ddrtestinfo) 
+		CALL(sys_set_pdp_state)
+		CALL(sys_get_ipv6_prefix)
+		CALL(sys_set_xlat)
+		
+#ifndef syscalls_counted
+.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
+#define syscalls_counted
+#endif
+.rept syscalls_padding
+		CALL(sys_ni_syscall)
+.endr
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.c
new file mode 100644
index 0000000..9256523
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.c
@@ -0,0 +1,219 @@
+/*
+ *  linux/arch/arm/kernel/compat.c
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * We keep the old params compatibility cruft in one place (here)
+ * so we don't end up with lots of mess around other places.
+ *
+ * NOTE:
+ *  The old struct param_struct is deprecated, but it will be kept in
+ *  the kernel for 5 years from now (2001). This will allow boot loaders
+ *  to convert to the new struct tag way.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/page.h>
+
+#include <asm/mach/arch.h>
+
+#include "compat.h"
+
+/*
+ * Usage:
+ *  - do not go blindly adding fields, add them at the end
+ *  - when adding fields, don't rely on the address until
+ *    a patch from me has been released
+ *  - unused fields should be zero (for future expansion)
+ *  - this structure is relatively short-lived - only
+ *    guaranteed to contain useful data in setup_arch()
+ *
+ * This is the old deprecated way to pass parameters to the kernel
+ */
+struct param_struct {
+    union {
+	struct {
+	    unsigned long page_size;		/*  0 */
+	    unsigned long nr_pages;		/*  4 */
+	    unsigned long ramdisk_size;		/*  8 */
+	    unsigned long flags;		/* 12 */
+#define FLAG_READONLY	1
+#define FLAG_RDLOAD	4
+#define FLAG_RDPROMPT	8
+	    unsigned long rootdev;		/* 16 */
+	    unsigned long video_num_cols;	/* 20 */
+	    unsigned long video_num_rows;	/* 24 */
+	    unsigned long video_x;		/* 28 */
+	    unsigned long video_y;		/* 32 */
+	    unsigned long memc_control_reg;	/* 36 */
+	    unsigned char sounddefault;		/* 40 */
+	    unsigned char adfsdrives;		/* 41 */
+	    unsigned char bytes_per_char_h;	/* 42 */
+	    unsigned char bytes_per_char_v;	/* 43 */
+	    unsigned long pages_in_bank[4];	/* 44 */
+	    unsigned long pages_in_vram;	/* 60 */
+	    unsigned long initrd_start;		/* 64 */
+	    unsigned long initrd_size;		/* 68 */
+	    unsigned long rd_start;		/* 72 */
+	    unsigned long system_rev;		/* 76 */
+	    unsigned long system_serial_low;	/* 80 */
+	    unsigned long system_serial_high;	/* 84 */
+	    unsigned long mem_fclk_21285;       /* 88 */
+	} s;
+	char unused[256];
+    } u1;
+    union {
+	char paths[8][128];
+	struct {
+	    unsigned long magic;
+	    char n[1024 - sizeof(unsigned long)];
+	} s;
+    } u2;
+    char commandline[COMMAND_LINE_SIZE];
+};
+
+static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size)
+{
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_MEM;
+	tag->hdr.size = tag_size(tag_mem32);
+	tag->u.mem.size = size;
+	tag->u.mem.start = start;
+
+	return tag;
+}
+
+static void __init build_tag_list(struct param_struct *params, void *taglist)
+{
+	struct tag *tag = taglist;
+
+	if (params->u1.s.page_size != PAGE_SIZE) {
+		printk(KERN_WARNING "Warning: bad configuration page, "
+		       "trying to continue\n");
+		return;
+	}
+
+	printk(KERN_DEBUG "Converting old-style param struct to taglist\n");
+
+#ifdef CONFIG_ARCH_NETWINDER
+	if (params->u1.s.nr_pages != 0x02000 &&
+	    params->u1.s.nr_pages != 0x04000 &&
+	    params->u1.s.nr_pages != 0x08000 &&
+	    params->u1.s.nr_pages != 0x10000) {
+		printk(KERN_WARNING "Warning: bad NeTTrom parameters "
+		       "detected, using defaults\n");
+
+		params->u1.s.nr_pages = 0x1000;	/* 16MB */
+		params->u1.s.ramdisk_size = 0;
+		params->u1.s.flags = FLAG_READONLY;
+		params->u1.s.initrd_start = 0;
+		params->u1.s.initrd_size = 0;
+		params->u1.s.rd_start = 0;
+	}
+#endif
+
+	tag->hdr.tag  = ATAG_CORE;
+	tag->hdr.size = tag_size(tag_core);
+	tag->u.core.flags = params->u1.s.flags & FLAG_READONLY;
+	tag->u.core.pagesize = params->u1.s.page_size;
+	tag->u.core.rootdev = params->u1.s.rootdev;
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_RAMDISK;
+	tag->hdr.size = tag_size(tag_ramdisk);
+	tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) |
+			       (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0);
+	tag->u.ramdisk.size  = params->u1.s.ramdisk_size;
+	tag->u.ramdisk.start = params->u1.s.rd_start;
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_INITRD;
+	tag->hdr.size = tag_size(tag_initrd);
+	tag->u.initrd.start = params->u1.s.initrd_start;
+	tag->u.initrd.size  = params->u1.s.initrd_size;
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_SERIAL;
+	tag->hdr.size = tag_size(tag_serialnr);
+	tag->u.serialnr.low = params->u1.s.system_serial_low;
+	tag->u.serialnr.high = params->u1.s.system_serial_high;
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_REVISION;
+	tag->hdr.size = tag_size(tag_revision);
+	tag->u.revision.rev = params->u1.s.system_rev;
+
+#ifdef CONFIG_ARCH_ACORN
+	if (machine_is_riscpc()) {
+		int i;
+		for (i = 0; i < 4; i++)
+			tag = memtag(tag, PHYS_OFFSET + (i << 26),
+				 params->u1.s.pages_in_bank[i] * PAGE_SIZE);
+	} else
+#endif
+	tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
+
+#ifdef CONFIG_FOOTBRIDGE
+	if (params->u1.s.mem_fclk_21285) {
+		tag = tag_next(tag);
+		tag->hdr.tag = ATAG_MEMCLK;
+		tag->hdr.size = tag_size(tag_memclk);
+		tag->u.memclk.fmemclk = params->u1.s.mem_fclk_21285;
+	}
+#endif
+
+#ifdef CONFIG_ARCH_EBSA285
+	if (machine_is_ebsa285()) {
+		tag = tag_next(tag);
+		tag->hdr.tag = ATAG_VIDEOTEXT;
+		tag->hdr.size = tag_size(tag_videotext);
+		tag->u.videotext.x            = params->u1.s.video_x;
+		tag->u.videotext.y            = params->u1.s.video_y;
+		tag->u.videotext.video_page   = 0;
+		tag->u.videotext.video_mode   = 0;
+		tag->u.videotext.video_cols   = params->u1.s.video_num_cols;
+		tag->u.videotext.video_ega_bx = 0;
+		tag->u.videotext.video_lines  = params->u1.s.video_num_rows;
+		tag->u.videotext.video_isvga  = 1;
+		tag->u.videotext.video_points = 8;
+	}
+#endif
+
+#ifdef CONFIG_ARCH_ACORN
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_ACORN;
+	tag->hdr.size = tag_size(tag_acorn);
+	tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg;
+	tag->u.acorn.vram_pages       = params->u1.s.pages_in_vram;
+	tag->u.acorn.sounddefault     = params->u1.s.sounddefault;
+	tag->u.acorn.adfsdrives       = params->u1.s.adfsdrives;
+#endif
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_CMDLINE;
+	tag->hdr.size = (strlen(params->commandline) + 3 +
+			 sizeof(struct tag_header)) >> 2;
+	strcpy(tag->u.cmdline.cmdline, params->commandline);
+
+	tag = tag_next(tag);
+	tag->hdr.tag = ATAG_NONE;
+	tag->hdr.size = 0;
+
+	memmove(params, taglist, ((int)tag) - ((int)taglist) +
+				 sizeof(struct tag_header));
+}
+
+void __init convert_to_tag_list(struct tag *tags)
+{
+	struct param_struct *params = (struct param_struct *)tags;
+	build_tag_list(params, &params->u2);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.h
new file mode 100644
index 0000000..39264ab
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/compat.h
@@ -0,0 +1,11 @@
+/*
+ *  linux/arch/arm/kernel/compat.h
+ *
+ *  Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+extern void convert_to_tag_list(struct tag *tags);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/cpuidle.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/cpuidle.c
new file mode 100644
index 0000000..89545f6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/cpuidle.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+#include <asm/proc-fns.h>
+
+int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
+{
+	cpu_do_idle();
+
+	return index;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/crash_dump.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/crash_dump.c
new file mode 100644
index 0000000..5d1286d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/crash_dump.c
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/kernel/crash_dump.c
+ *
+ * Copyright (C) 2010 Nokia Corporation.
+ * Author: Mika Westerberg
+ *
+ * This code is taken from arch/x86/kernel/crash_dump_64.c
+ *   Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ *   Copyright (C) IBM Corporation, 2004. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/crash_dump.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+/**
+ * copy_oldmem_page() - copy one page from old kernel memory
+ * @pfn: page frame number to be copied
+ * @buf: buffer where the copied page is placed
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page
+ * @userbuf: if set, @buf is int he user address space
+ *
+ * This function copies one page from old kernel memory into buffer pointed by
+ * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+ * copied or negative error in case of failure.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+			 size_t csize, unsigned long offset,
+			 int userbuf)
+{
+	void *vaddr;
+
+	if (!csize)
+		return 0;
+
+	vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
+	if (!vaddr)
+		return -ENOMEM;
+
+	if (userbuf) {
+		if (copy_to_user(buf, vaddr + offset, csize)) {
+			iounmap(vaddr);
+			return -EFAULT;
+		}
+	} else {
+		memcpy(buf, vaddr + offset, csize);
+	}
+
+	iounmap(vaddr);
+	return csize;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/debug.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/debug.S
new file mode 100644
index 0000000..c45522c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/debug.S
@@ -0,0 +1,203 @@
+/*
+ *  linux/arch/arm/kernel/debug.S
+ *
+ *  Copyright (C) 1994-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  32-bit debugging code
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+		.text
+
+/*
+ * Some debugging routines (useful if you've got MM problems and
+ * printk isn't working).  For DEBUGGING ONLY!!!  Do not leave
+ * references to these in a production kernel!
+ */
+
+#if defined(CONFIG_DEBUG_ICEDCC)
+		@@ debug using ARM EmbeddedICE DCC channel
+
+		.macro	addruart, rp, rv, tmp
+		.endm
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
+
+		.macro	senduart, rd, rx
+		mcr	p14, 0, \rd, c0, c5, 0
+		.endm
+
+		.macro	busyuart, rd, rx
+1001:
+		mrc	p14, 0, \rx, c0, c1, 0
+		tst	\rx, #0x20000000
+		beq	1001b
+		.endm
+
+		.macro	waituart, rd, rx
+		mov	\rd, #0x2000000
+1001:
+		subs	\rd, \rd, #1
+		bmi	1002f
+		mrc	p14, 0, \rx, c0, c1, 0
+		tst	\rx, #0x20000000
+		bne	1001b
+1002:
+		.endm
+
+#elif defined(CONFIG_CPU_XSCALE)
+
+		.macro	senduart, rd, rx
+		mcr	p14, 0, \rd, c8, c0, 0
+		.endm
+
+		.macro	busyuart, rd, rx
+1001:
+		mrc	p14, 0, \rx, c14, c0, 0
+		tst	\rx, #0x10000000
+		beq	1001b
+		.endm
+
+		.macro	waituart, rd, rx
+		mov	\rd, #0x10000000
+1001:
+		subs	\rd, \rd, #1
+		bmi	1002f
+		mrc	p14, 0, \rx, c14, c0, 0
+		tst	\rx, #0x10000000
+		bne	1001b
+1002:
+		.endm
+
+#else
+
+		.macro	senduart, rd, rx
+		mcr	p14, 0, \rd, c1, c0, 0
+		.endm
+
+		.macro	busyuart, rd, rx
+1001:
+		mrc	p14, 0, \rx, c0, c0, 0
+		tst	\rx, #2
+		beq	1001b
+
+		.endm
+
+		.macro	waituart, rd, rx
+		mov	\rd, #0x2000000
+1001:
+		subs	\rd, \rd, #1
+		bmi	1002f
+		mrc	p14, 0, \rx, c0, c0, 0
+		tst	\rx, #2
+		bne	1001b
+1002:
+		.endm
+
+#endif	/* CONFIG_CPU_V6 */
+
+#elif !defined(CONFIG_DEBUG_SEMIHOSTING)
+#include <mach/debug-macro.S>
+#endif	/* CONFIG_DEBUG_ICEDCC */
+
+#ifdef CONFIG_MMU
+		.macro	addruart_current, rx, tmp1, tmp2
+		addruart	\tmp1, \tmp2, \rx
+		mrc		p15, 0, \rx, c1, c0
+		tst		\rx, #1
+		moveq		\rx, \tmp1
+		movne		\rx, \tmp2
+		.endm
+
+#else /* !CONFIG_MMU */
+		.macro	addruart_current, rx, tmp1, tmp2
+		addruart	\rx, \tmp1
+		.endm
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Useful debugging routines
+ */
+ENTRY(printhex8)
+		mov	r1, #8
+		b	printhex
+ENDPROC(printhex8)
+
+ENTRY(printhex4)
+		mov	r1, #4
+		b	printhex
+ENDPROC(printhex4)
+
+ENTRY(printhex2)
+		mov	r1, #2
+printhex:	adr	r2, hexbuf
+		add	r3, r2, r1
+		mov	r1, #0
+		strb	r1, [r3]
+1:		and	r1, r0, #15
+		mov	r0, r0, lsr #4
+		cmp	r1, #10
+		addlt	r1, r1, #'0'
+		addge	r1, r1, #'a' - 10
+		strb	r1, [r3, #-1]!
+		teq	r3, r2
+		bne	1b
+		mov	r0, r2
+		b	printascii
+ENDPROC(printhex2)
+
+hexbuf:		.space 16
+
+		.ltorg
+
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
+ENTRY(printascii)
+		addruart_current r3, r1, r2
+		b	2f
+1:		waituart r2, r3
+		senduart r1, r3
+		busyuart r2, r3
+		teq	r1, #'\n'
+		moveq	r1, #'\r'
+		beq	1b
+2:		teq	r0, #0
+		ldrneb	r1, [r0], #1
+		teqne	r1, #0
+		bne	1b
+		mov	pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+		addruart_current r3, r1, r2
+		mov	r1, r0
+		mov	r0, #0
+		b	1b
+ENDPROC(printch)
+
+#else
+
+ENTRY(printascii)
+		mov	r1, r0
+		mov	r0, #0x04		@ SYS_WRITE0
+	ARM(	svc	#0x123456	)
+	THUMB(	svc	#0xab		)
+		mov	pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+		adr	r1, hexbuf
+		strb	r0, [r1]
+		mov	r0, #0x03		@ SYS_WRITEC
+	ARM(	svc	#0x123456	)
+	THUMB(	svc	#0xab		)
+		mov	pc, lr
+ENDPROC(printch)
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/devtree.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/devtree.c
new file mode 100644
index 0000000..bee7f9d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/devtree.c
@@ -0,0 +1,134 @@
+/*
+ *  linux/arch/arm/kernel/devtree.c
+ *
+ *  Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+	arm_add_memory(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+	return alloc_bootmem_align(size, align);
+}
+
+void __init arm_dt_memblock_reserve(void)
+{
+	u64 *reserve_map, base, size;
+
+	if (!initial_boot_params)
+		return;
+
+	/* Reserve the dtb region */
+	memblock_reserve(virt_to_phys(initial_boot_params),
+			 be32_to_cpu(initial_boot_params->totalsize));
+
+	/*
+	 * Process the reserve map.  This will probably overlap the initrd
+	 * and dtb locations which are already reserved, but overlaping
+	 * doesn't hurt anything
+	 */
+	reserve_map = ((void*)initial_boot_params) +
+			be32_to_cpu(initial_boot_params->off_mem_rsvmap);
+	while (1) {
+		base = be64_to_cpup(reserve_map++);
+		size = be64_to_cpup(reserve_map++);
+		if (!size)
+			break;
+		memblock_reserve(base, size);
+	}
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt_phys: physical address of dt blob
+ *
+ * If a dtb was passed to the kernel in r2, then use it to choose the
+ * correct machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+{
+	struct boot_param_header *devtree;
+	struct machine_desc *mdesc, *mdesc_best = NULL;
+	unsigned int score, mdesc_score = ~1;
+	unsigned long dt_root;
+	const char *model;
+
+	if (!dt_phys)
+		return NULL;
+
+	devtree = phys_to_virt(dt_phys);
+
+	/* check device tree validity */
+	if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+		return NULL;
+
+	/* Search the mdescs for the 'best' compatible value match */
+	initial_boot_params = devtree;
+	dt_root = of_get_flat_dt_root();
+	for_each_machine_desc(mdesc) {
+		score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+		if (score > 0 && score < mdesc_score) {
+			mdesc_best = mdesc;
+			mdesc_score = score;
+		}
+	}
+	if (!mdesc_best) {
+		const char *prop;
+		long size;
+
+		early_print("\nError: unrecognized/unsupported "
+			    "device tree compatible list:\n[ ");
+
+		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+		while (size > 0) {
+			early_print("'%s' ", prop);
+			size -= strlen(prop) + 1;
+			prop += strlen(prop) + 1;
+		}
+		early_print("]\n\n");
+
+		dump_machine_table(); /* does not return */
+	}
+
+	model = of_get_flat_dt_prop(dt_root, "model", NULL);
+	if (!model)
+		model = of_get_flat_dt_prop(dt_root, "compatible", NULL);
+	if (!model)
+		model = "<unknown>";
+	pr_info("Machine: %s, model: %s\n", mdesc_best->name, model);
+
+	/* Retrieve various information from the /chosen node */
+	of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+	/* Initialize {size,address}-cells info */
+	of_scan_flat_dt(early_init_dt_scan_root, NULL);
+	/* Setup memory, calling early_init_dt_add_memory_arch */
+	of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+	/* Change machine number to match the mdesc we're using */
+	__machine_arch_type = mdesc_best->nr;
+
+	return mdesc_best;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma-isa.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma-isa.c
new file mode 100644
index 0000000..360bb6d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma-isa.c
@@ -0,0 +1,222 @@
+/*
+ *  linux/arch/arm/kernel/dma-isa.c
+ *
+ *  Copyright (C) 1999-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  ISA DMA primitives
+ *  Taken from various sources, including:
+ *   linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ *     Written by Hennus Bergman, 1992.
+ *     High DMA channel support & info by Hannu Savolainen and John Boyd,
+ *     Nov. 1992.
+ *   arch/arm/kernel/dma-ebsa285.c
+ *   Copyright (C) 1998 Phil Blundell
+ */
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+#include <asm/mach/dma.h>
+
+#define ISA_DMA_MASK		0
+#define ISA_DMA_MODE		1
+#define ISA_DMA_CLRFF		2
+#define ISA_DMA_PGHI		3
+#define ISA_DMA_PGLO		4
+#define ISA_DMA_ADDR		5
+#define ISA_DMA_COUNT		6
+
+static unsigned int isa_dma_port[8][7] = {
+	/* MASK   MODE   CLRFF  PAGE_HI PAGE_LO ADDR COUNT */
+	{  0x0a,  0x0b,  0x0c,  0x487,  0x087,  0x00, 0x01 },
+	{  0x0a,  0x0b,  0x0c,  0x483,  0x083,  0x02, 0x03 },
+	{  0x0a,  0x0b,  0x0c,  0x481,  0x081,  0x04, 0x05 },
+	{  0x0a,  0x0b,  0x0c,  0x482,  0x082,  0x06, 0x07 },
+	{  0xd4,  0xd6,  0xd8,  0x000,  0x000,  0xc0, 0xc2 },
+	{  0xd4,  0xd6,  0xd8,  0x48b,  0x08b,  0xc4, 0xc6 },
+	{  0xd4,  0xd6,  0xd8,  0x489,  0x089,  0xc8, 0xca },
+	{  0xd4,  0xd6,  0xd8,  0x48a,  0x08a,  0xcc, 0xce }
+};
+
+static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
+{
+	unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT];
+	int count;
+
+	count = 1 + inb(io_port);
+	count |= inb(io_port) << 8;
+
+	return chan < 4 ? count : (count << 1);
+}
+
+static void isa_enable_dma(unsigned int chan, dma_t *dma)
+{
+	if (dma->invalid) {
+		unsigned long address, length;
+		unsigned int mode;
+		enum dma_data_direction direction;
+
+		mode = (chan & 3) | dma->dma_mode;
+		switch (dma->dma_mode & DMA_MODE_MASK) {
+		case DMA_MODE_READ:
+			direction = DMA_FROM_DEVICE;
+			break;
+
+		case DMA_MODE_WRITE:
+			direction = DMA_TO_DEVICE;
+			break;
+
+		case DMA_MODE_CASCADE:
+			direction = DMA_BIDIRECTIONAL;
+			break;
+
+		default:
+			direction = DMA_NONE;
+			break;
+		}
+
+		if (!dma->sg) {
+			/*
+			 * Cope with ISA-style drivers which expect cache
+			 * coherence.
+			 */
+			dma->sg = &dma->buf;
+			dma->sgcount = 1;
+			dma->buf.length = dma->count;
+			dma->buf.dma_address = dma_map_single(NULL,
+				dma->addr, dma->count,
+				direction);
+		}
+
+		address = dma->buf.dma_address;
+		length  = dma->buf.length - 1;
+
+		outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]);
+		outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]);
+
+		if (chan >= 4) {
+			address >>= 1;
+			length >>= 1;
+		}
+
+		outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]);
+
+		outb(address, isa_dma_port[chan][ISA_DMA_ADDR]);
+		outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]);
+
+		outb(length, isa_dma_port[chan][ISA_DMA_COUNT]);
+		outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]);
+
+		outb(mode, isa_dma_port[chan][ISA_DMA_MODE]);
+		dma->invalid = 0;
+	}
+	outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]);
+}
+
+static void isa_disable_dma(unsigned int chan, dma_t *dma)
+{
+	outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]);
+}
+
+static struct dma_ops isa_dma_ops = {
+	.type		= "ISA",
+	.enable		= isa_enable_dma,
+	.disable	= isa_disable_dma,
+	.residue	= isa_get_dma_residue,
+};
+
+static struct resource dma_resources[] = { {
+	.name	= "dma1",
+	.start	= 0x0000,
+	.end	= 0x000f
+}, {
+	.name	= "dma low page",
+	.start	= 0x0080,
+	.end 	= 0x008f
+}, {
+	.name	= "dma2",
+	.start	= 0x00c0,
+	.end	= 0x00df
+}, {
+	.name	= "dma high page",
+	.start	= 0x0480,
+	.end	= 0x048f
+} };
+
+static dma_t isa_dma[8];
+
+/*
+ * ISA DMA always starts at channel 0
+ */
+void __init isa_init_dma(void)
+{
+	/*
+	 * Try to autodetect presence of an ISA DMA controller.
+	 * We do some minimal initialisation, and check that
+	 * channel 0's DMA address registers are writeable.
+	 */
+	outb(0xff, 0x0d);
+	outb(0xff, 0xda);
+
+	/*
+	 * Write high and low address, and then read them back
+	 * in the same order.
+	 */
+	outb(0x55, 0x00);
+	outb(0xaa, 0x00);
+
+	if (inb(0) == 0x55 && inb(0) == 0xaa) {
+		unsigned int chan, i;
+
+		for (chan = 0; chan < 8; chan++) {
+			isa_dma[chan].d_ops = &isa_dma_ops;
+			isa_disable_dma(chan, NULL);
+		}
+
+		outb(0x40, 0x0b);
+		outb(0x41, 0x0b);
+		outb(0x42, 0x0b);
+		outb(0x43, 0x0b);
+
+		outb(0xc0, 0xd6);
+		outb(0x41, 0xd6);
+		outb(0x42, 0xd6);
+		outb(0x43, 0xd6);
+
+		outb(0, 0xd4);
+
+		outb(0x10, 0x08);
+		outb(0x10, 0xd0);
+
+		/*
+		 * Is this correct?  According to my documentation, it
+		 * doesn't appear to be.  It should be:
+		 *  outb(0x3f, 0x40b); outb(0x3f, 0x4d6);
+		 */
+		outb(0x30, 0x40b);
+		outb(0x31, 0x40b);
+		outb(0x32, 0x40b);
+		outb(0x33, 0x40b);
+		outb(0x31, 0x4d6);
+		outb(0x32, 0x4d6);
+		outb(0x33, 0x4d6);
+
+		for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
+			request_resource(&ioport_resource, dma_resources + i);
+
+		for (chan = 0; chan < 8; chan++) {
+			int ret = isa_dma_add(chan, &isa_dma[chan]);
+			if (ret)
+				printk(KERN_ERR "ISADMA%u: unable to register: %d\n",
+					chan, ret);
+		}
+
+		request_dma(DMA_ISA_CASCADE, "cascade");
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma.c
new file mode 100644
index 0000000..7b829d9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/dma.c
@@ -0,0 +1,302 @@
+/*
+ *  linux/arch/arm/kernel/dma.c
+ *
+ *  Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Front-end to the DMA handling.  This handles the allocation/freeing
+ *  of DMA channels, and provides a unified interface to the machines
+ *  DMA facilities.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#include <asm/dma.h>
+
+#include <asm/mach/dma.h>
+
+DEFINE_RAW_SPINLOCK(dma_spin_lock);
+EXPORT_SYMBOL(dma_spin_lock);
+
+static dma_t *dma_chan[MAX_DMA_CHANNELS];
+
+static inline dma_t *dma_channel(unsigned int chan)
+{
+	if (chan >= MAX_DMA_CHANNELS)
+		return NULL;
+
+	return dma_chan[chan];
+}
+
+int __init isa_dma_add(unsigned int chan, dma_t *dma)
+{
+	if (!dma->d_ops)
+		return -EINVAL;
+
+	sg_init_table(&dma->buf, 1);
+
+	if (dma_chan[chan])
+		return -EBUSY;
+	dma_chan[chan] = dma;
+	return 0;
+}
+
+/*
+ * Request DMA channel
+ *
+ * On certain platforms, we have to allocate an interrupt as well...
+ */
+int request_dma(unsigned int chan, const char *device_id)
+{
+	dma_t *dma = dma_channel(chan);
+	int ret;
+
+	if (!dma)
+		goto bad_dma;
+
+	if (xchg(&dma->lock, 1) != 0)
+		goto busy;
+
+	dma->device_id = device_id;
+	dma->active    = 0;
+	dma->invalid   = 1;
+
+	ret = 0;
+	if (dma->d_ops->request)
+		ret = dma->d_ops->request(chan, dma);
+
+	if (ret)
+		xchg(&dma->lock, 0);
+
+	return ret;
+
+bad_dma:
+	printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan);
+	return -EINVAL;
+
+busy:
+	return -EBUSY;
+}
+EXPORT_SYMBOL(request_dma);
+
+/*
+ * Free DMA channel
+ *
+ * On certain platforms, we have to free interrupt as well...
+ */
+void free_dma(unsigned int chan)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (!dma)
+		goto bad_dma;
+
+	if (dma->active) {
+		printk(KERN_ERR "dma%d: freeing active DMA\n", chan);
+		dma->d_ops->disable(chan, dma);
+		dma->active = 0;
+	}
+
+	if (xchg(&dma->lock, 0) != 0) {
+		if (dma->d_ops->free)
+			dma->d_ops->free(chan, dma);
+		return;
+	}
+
+	printk(KERN_ERR "dma%d: trying to free free DMA\n", chan);
+	return;
+
+bad_dma:
+	printk(KERN_ERR "dma: trying to free DMA%d\n", chan);
+}
+EXPORT_SYMBOL(free_dma);
+
+/* Set DMA Scatter-Gather list
+ */
+void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (dma->active)
+		printk(KERN_ERR "dma%d: altering DMA SG while "
+		       "DMA active\n", chan);
+
+	dma->sg = sg;
+	dma->sgcount = nr_sg;
+	dma->invalid = 1;
+}
+EXPORT_SYMBOL(set_dma_sg);
+
+/* Set DMA address
+ *
+ * Copy address to the structure, and set the invalid bit
+ */
+void __set_dma_addr (unsigned int chan, void *addr)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (dma->active)
+		printk(KERN_ERR "dma%d: altering DMA address while "
+		       "DMA active\n", chan);
+
+	dma->sg = NULL;
+	dma->addr = addr;
+	dma->invalid = 1;
+}
+EXPORT_SYMBOL(__set_dma_addr);
+
+/* Set DMA byte count
+ *
+ * Copy address to the structure, and set the invalid bit
+ */
+void set_dma_count (unsigned int chan, unsigned long count)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (dma->active)
+		printk(KERN_ERR "dma%d: altering DMA count while "
+		       "DMA active\n", chan);
+
+	dma->sg = NULL;
+	dma->count = count;
+	dma->invalid = 1;
+}
+EXPORT_SYMBOL(set_dma_count);
+
+/* Set DMA direction mode
+ */
+void set_dma_mode (unsigned int chan, unsigned int mode)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (dma->active)
+		printk(KERN_ERR "dma%d: altering DMA mode while "
+		       "DMA active\n", chan);
+
+	dma->dma_mode = mode;
+	dma->invalid = 1;
+}
+EXPORT_SYMBOL(set_dma_mode);
+
+/* Enable DMA channel
+ */
+void enable_dma (unsigned int chan)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (!dma->lock)
+		goto free_dma;
+
+	if (dma->active == 0) {
+		dma->active = 1;
+		dma->d_ops->enable(chan, dma);
+	}
+	return;
+
+free_dma:
+	printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan);
+	BUG();
+}
+EXPORT_SYMBOL(enable_dma);
+
+/* Disable DMA channel
+ */
+void disable_dma (unsigned int chan)
+{
+	dma_t *dma = dma_channel(chan);
+
+	if (!dma->lock)
+		goto free_dma;
+
+	if (dma->active == 1) {
+		dma->active = 0;
+		dma->d_ops->disable(chan, dma);
+	}
+	return;
+
+free_dma:
+	printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan);
+	BUG();
+}
+EXPORT_SYMBOL(disable_dma);
+
+/*
+ * Is the specified DMA channel active?
+ */
+int dma_channel_active(unsigned int chan)
+{
+	dma_t *dma = dma_channel(chan);
+	return dma->active;
+}
+EXPORT_SYMBOL(dma_channel_active);
+
+void set_dma_page(unsigned int chan, char pagenr)
+{
+	printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan);
+}
+EXPORT_SYMBOL(set_dma_page);
+
+void set_dma_speed(unsigned int chan, int cycle_ns)
+{
+	dma_t *dma = dma_channel(chan);
+	int ret = 0;
+
+	if (dma->d_ops->setspeed)
+		ret = dma->d_ops->setspeed(chan, dma, cycle_ns);
+	dma->speed = ret;
+}
+EXPORT_SYMBOL(set_dma_speed);
+
+int get_dma_residue(unsigned int chan)
+{
+	dma_t *dma = dma_channel(chan);
+	int ret = 0;
+
+	if (dma->d_ops->residue)
+		ret = dma->d_ops->residue(chan, dma);
+
+	return ret;
+}
+EXPORT_SYMBOL(get_dma_residue);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+		dma_t *dma = dma_channel(i);
+		if (dma && dma->lock)
+			seq_printf(m, "%2d: %s\n", i, dma->device_id);
+	}
+	return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+	.open		= proc_dma_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+	proc_create("dma", 0, NULL, &proc_dma_operations);
+	return 0;
+}
+
+__initcall(proc_dma_init);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/early_printk.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/early_printk.c
new file mode 100644
index 0000000..4307653
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/early_printk.c
@@ -0,0 +1,46 @@
+/*
+ *  linux/arch/arm/kernel/early_printk.c
+ *
+ *  Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+extern void printch(int);
+
+static void early_write(const char *s, unsigned n)
+{
+	while (n-- > 0) {
+		if (*s == '\n')
+			printch('\r');
+		printch(*s);
+		s++;
+	}
+}
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+	early_write(s, n);
+}
+
+static struct console early_console_dev = {
+	.name =		"earlycon",
+	.write =	early_console_write,
+	.flags =	CON_PRINTBUFFER | CON_BOOT,
+	.index =	-1,
+};
+
+static int __init setup_early_printk(char *buf)
+{
+	early_console = &early_console_dev;
+	register_console(&early_console_dev);
+	return 0;
+}
+
+early_param("earlyprintk", setup_early_printk);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/elf.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/elf.c
new file mode 100644
index 0000000..d0d1e83
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/elf.c
@@ -0,0 +1,91 @@
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <asm/system_info.h>
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+	unsigned int eflags;
+
+	/* Make sure it's an ARM executable */
+	if (x->e_machine != EM_ARM)
+		return 0;
+
+	/* Make sure the entry address is reasonable */
+	if (x->e_entry & 1) {
+		if (!(elf_hwcap & HWCAP_THUMB))
+			return 0;
+	} else if (x->e_entry & 3)
+		return 0;
+
+	eflags = x->e_flags;
+	if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
+		unsigned int flt_fmt;
+
+		/* APCS26 is only allowed if the CPU supports it */
+		if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
+			return 0;
+
+		flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
+
+		/* VFP requires the supporting code */
+		if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
+			return 0;
+	}
+	return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
+
+void elf_set_personality(const struct elf32_hdr *x)
+{
+	unsigned int eflags = x->e_flags;
+	unsigned int personality = current->personality & ~PER_MASK;
+
+	/*
+	 * We only support Linux ELF executables, so always set the
+	 * personality to LINUX.
+	 */
+	personality |= PER_LINUX;
+
+	/*
+	 * APCS-26 is only valid for OABI executables
+	 */
+	if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
+	    (eflags & EF_ARM_APCS_26))
+		personality &= ~ADDR_LIMIT_32BIT;
+	else
+		personality |= ADDR_LIMIT_32BIT;
+
+	set_personality(personality);
+
+	/*
+	 * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
+	 * and CP1, we only enable access to the iWMMXt coprocessor if the
+	 * binary is EABI or softfloat (and thus, guaranteed not to use
+	 * FPA instructions.)
+	 */
+	if (elf_hwcap & HWCAP_IWMMXT &&
+	    eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) {
+		set_thread_flag(TIF_USING_IWMMXT);
+	} else {
+		clear_thread_flag(TIF_USING_IWMMXT);
+	}
+}
+EXPORT_SYMBOL(elf_set_personality);
+
+/*
+ * Set READ_IMPLIES_EXEC if:
+ *  - the binary requires an executable stack
+ *  - we're running on a CPU which doesn't support NX.
+ */
+int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
+{
+	if (executable_stack != EXSTACK_DISABLE_X)
+		return 1;
+	if (cpu_architecture() < CPU_ARCH_ARMv6)
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL(arm_elf_read_implies_exec);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-armv.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-armv.S
new file mode 100644
index 0000000..379b287
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-armv.S
@@ -0,0 +1,1224 @@
+/*
+ *  linux/arch/arm/kernel/entry-armv.S
+ *
+ *  Copyright (C) 1996,1997,1998 Russell King.
+ *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Low-level vector interface routines
+ *
+ *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
+ *  that causes it to save wrong values...  Be aware!
+ */
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
+#include <asm/vfpmacros.h>
+#ifndef CONFIG_MULTI_IRQ_HANDLER
+#include <mach/entry-macro.S>
+#endif
+#include <asm/thread_notify.h>
+#include <asm/unwind.h>
+#include <asm/unistd.h>
+#include <asm/tls.h>
+#include <asm/system_info.h>
+
+#include "entry-header.S"
+#include <asm/entry-macro-multi.S>
+
+#ifdef CONFIG_IRQ_STACK
+.globl  irq_handler_stack_top
+#endif
+
+/*
+ * Interrupt handling.
+ */
+	.macro	irq_handler
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	ldr	r1, =handle_arch_irq
+	mov	r0, sp
+	adr	lr, BSYM(9997f)
+	ldr	pc, [r1]
+#else
+	arch_irq_handler_default
+#endif
+9997:
+	.endm
+
+	.macro	pabt_helper
+	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+#ifdef MULTI_PABORT
+	ldr	ip, .LCprocfns
+	mov	lr, pc
+	ldr	pc, [ip, #PROCESSOR_PABT_FUNC]
+#else
+	bl	CPU_PABORT_HANDLER
+#endif
+	.endm
+
+	.macro	dabt_helper
+
+	@
+	@ Call the processor-specific abort handler:
+	@
+	@  r2 - pt_regs
+	@  r4 - aborted context pc
+	@  r5 - aborted context psr
+	@
+	@ The abort handler must return the aborted address in r0, and
+	@ the fault status register in r1.  r9 must be preserved.
+	@
+#ifdef MULTI_DABORT
+	ldr	ip, .LCprocfns
+	mov	lr, pc
+	ldr	pc, [ip, #PROCESSOR_DABT_FUNC]
+#else
+	bl	CPU_DABORT_HANDLER
+#endif
+	.endm
+
+#ifdef CONFIG_KPROBES
+	.section	.kprobes.text,"ax",%progbits
+#else
+	.text
+#endif
+
+/*
+ * Invalid mode handlers
+ */
+	.macro	inv_entry, reason
+	sub	sp, sp, #S_FRAME_SIZE
+ ARM(	stmib	sp, {r1 - lr}		)
+ THUMB(	stmia	sp, {r0 - r12}		)
+ THUMB(	str	sp, [sp, #S_SP]		)
+ THUMB(	str	lr, [sp, #S_LR]		)
+	mov	r1, #\reason
+	.endm
+
+__pabt_invalid:
+	inv_entry BAD_PREFETCH
+	b	common_invalid
+ENDPROC(__pabt_invalid)
+
+__dabt_invalid:
+	inv_entry BAD_DATA
+	b	common_invalid
+ENDPROC(__dabt_invalid)
+
+__irq_invalid:
+	inv_entry BAD_IRQ
+	b	common_invalid
+ENDPROC(__irq_invalid)
+
+__und_invalid:
+	inv_entry BAD_UNDEFINSTR
+
+	@
+	@ XXX fall through to common_invalid
+	@
+
+@
+@ common_invalid - generic code for failed exception (re-entrant version of handlers)
+@
+common_invalid:
+	zero_fp
+
+	ldmia	r0, {r4 - r6}
+	add	r0, sp, #S_PC		@ here for interlock avoidance
+	mov	r7, #-1			@  ""   ""    ""        ""
+	str	r4, [sp]		@ save preserved r0
+	stmia	r0, {r5 - r7}		@ lr_<exception>,
+					@ cpsr_<exception>, "old_r0"
+
+	mov	r0, sp
+	b	bad_mode
+ENDPROC(__und_invalid)
+
+/*
+ * SVC mode handlers
+ */
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define SPFIX(code...) code
+#else
+#define SPFIX(code...)
+#endif
+
+	.macro	svc_entry, stack_hole=0
+ UNWIND(.fnstart		)
+ UNWIND(.save {r0 - pc}		)
+	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#ifdef CONFIG_THUMB2_KERNEL
+ SPFIX(	str	r0, [sp]	)	@ temporarily saved
+ SPFIX(	mov	r0, sp		)
+ SPFIX(	tst	r0, #4		)	@ test original stack alignment
+ SPFIX(	ldr	r0, [sp]	)	@ restored
+#else
+ SPFIX(	tst	sp, #4		)
+#endif
+ SPFIX(	subeq	sp, sp, #4	)
+	stmia	sp, {r1 - r12}
+
+	ldmia	r0, {r3 - r5}
+	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
+	mov	r6, #-1			@  ""  ""      ""       ""
+	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX(	addeq	r2, r2, #4	)
+	str	r3, [sp, #-4]!		@ save the "real" r0 copied
+					@ from the exception stack
+
+	mov	r3, lr
+
+	@
+	@ We are now ready to fill in the remaining blanks on the stack:
+	@
+	@  r2 - sp_svc
+	@  r3 - lr_svc
+	@  r4 - lr_<exception>, already fixed up for correct return/restart
+	@  r5 - spsr_<exception>
+	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
+	@
+	stmia	r7, {r2 - r6}
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off
+#endif
+	.endm
+
+	.align	5
+__dabt_svc:
+	svc_entry
+	mov	r2, sp
+	dabt_helper
+
+	@
+	@ IRQs off again before pulling preserved data off the stack
+	@
+	disable_irq_notrace
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	tst	r5, #PSR_I_BIT
+	bleq	trace_hardirqs_on
+	tst	r5, #PSR_I_BIT
+	blne	trace_hardirqs_off
+#endif
+	svc_exit r5				@ return from exception
+ UNWIND(.fnend		)
+ENDPROC(__dabt_svc)
+
+	.align	5
+__irq_svc:
+	svc_entry
+#ifdef CONFIG_IRQ_STACK
+	mov r6, sp
+	ldr sp, =irq_handler_stack_top
+#endif
+	irq_handler
+#ifdef CONFIG_IRQ_STACK
+	mov sp, r6
+#endif
+
+#ifdef CONFIG_PREEMPT
+	get_thread_info tsk
+	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
+	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
+	teq	r8, #0				@ if preempt count != 0
+	movne	r0, #0				@ force flags to 0
+	tst	r0, #_TIF_NEED_RESCHED
+	blne	svc_preempt
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	@ The parent context IRQs must have been enabled to get here in
+	@ the first place, so there's no point checking the PSR I bit.
+	bl	trace_hardirqs_on
+#endif
+	svc_exit r5				@ return from exception
+ UNWIND(.fnend		)
+ENDPROC(__irq_svc)
+
+	.ltorg
+
+#ifdef CONFIG_PREEMPT
+svc_preempt:
+	mov	r8, lr
+1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
+	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
+	tst	r0, #_TIF_NEED_RESCHED
+	moveq	pc, r8				@ go again
+	b	1b
+#endif
+
+__und_fault:
+	@ Correct the PC such that it is pointing at the instruction
+	@ which caused the fault.  If the faulting instruction was ARM
+	@ the PC will be pointing at the next instruction, and have to
+	@ subtract 4.  Otherwise, it is Thumb, and the PC will be
+	@ pointing at the second half of the Thumb instruction.  We
+	@ have to subtract 2.
+	ldr	r2, [r0, #S_PC]
+	sub	r2, r2, r1
+	str	r2, [r0, #S_PC]
+	b	do_undefinstr
+ENDPROC(__und_fault)
+
+	.align	5
+__und_svc:
+#ifdef CONFIG_KPROBES
+	@ If a kprobe is about to simulate a "stmdb sp..." instruction,
+	@ it obviously needs free stack space which then will belong to
+	@ the saved context.
+	svc_entry 64
+#else
+	svc_entry
+#endif
+	@
+	@ call emulation code, which returns using r9 if it has emulated
+	@ the instruction, or the more conventional lr if we are to treat
+	@ this as a real undefined instruction
+	@
+	@  r0 - instruction
+	@
+#ifndef CONFIG_THUMB2_KERNEL
+	ldr	r0, [r4, #-4]
+#else
+	mov	r1, #2
+	ldrh	r0, [r4, #-2]			@ Thumb instruction at LR - 2
+	cmp	r0, #0xe800			@ 32-bit instruction if xx >= 0
+	blo	__und_svc_fault
+	ldrh	r9, [r4]			@ bottom 16 bits
+	add	r4, r4, #2
+	str	r4, [sp, #S_PC]
+	orr	r0, r9, r0, lsl #16
+#endif
+	adr	r9, BSYM(__und_svc_finish)
+	mov	r2, r4
+	bl	call_fpe
+
+	mov	r1, #4				@ PC correction to apply
+__und_svc_fault:
+	mov	r0, sp				@ struct pt_regs *regs
+	bl	__und_fault
+
+	@
+	@ IRQs off again before pulling preserved data off the stack
+	@
+__und_svc_finish:
+	disable_irq_notrace
+
+	@
+	@ restore SPSR and restart the instruction
+	@
+	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
+#ifdef CONFIG_TRACE_IRQFLAGS
+	tst	r5, #PSR_I_BIT
+	bleq	trace_hardirqs_on
+	tst	r5, #PSR_I_BIT
+	blne	trace_hardirqs_off
+#endif
+	svc_exit r5				@ return from exception
+ UNWIND(.fnend		)
+ENDPROC(__und_svc)
+
+	.align	5
+__pabt_svc:
+	svc_entry
+	mov	r2, sp				@ regs
+	pabt_helper
+
+	@
+	@ IRQs off again before pulling preserved data off the stack
+	@
+	disable_irq_notrace
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+	tst	r5, #PSR_I_BIT
+	bleq	trace_hardirqs_on
+	tst	r5, #PSR_I_BIT
+	blne	trace_hardirqs_off
+#endif
+	svc_exit r5				@ return from exception
+ UNWIND(.fnend		)
+ENDPROC(__pabt_svc)
+
+	.align	5
+.LCcralign:
+	.word	cr_alignment
+#ifdef MULTI_DABORT
+.LCprocfns:
+	.word	processor
+#endif
+.LCfp:
+	.word	fp_enter
+
+/*
+ * User mode handlers
+ *
+ * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
+ */
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#error "sizeof(struct pt_regs) must be a multiple of 8"
+#endif
+
+	.macro	usr_entry
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)	@ don't unwind the user space
+	sub	sp, sp, #S_FRAME_SIZE
+ ARM(	stmib	sp, {r1 - r12}	)
+ THUMB(	stmia	sp, {r0 - r12}	)
+
+	ldmia	r0, {r3 - r5}
+	add	r0, sp, #S_PC		@ here for interlock avoidance
+	mov	r6, #-1			@  ""  ""     ""        ""
+
+	str	r3, [sp]		@ save the "real" r0 copied
+					@ from the exception stack
+
+	@
+	@ We are now ready to fill in the remaining blanks on the stack:
+	@
+	@  r4 - lr_<exception>, already fixed up for correct return/restart
+	@  r5 - spsr_<exception>
+	@  r6 - orig_r0 (see pt_regs definition in ptrace.h)
+	@
+	@ Also, separately save sp_usr and lr_usr
+	@
+	stmia	r0, {r4 - r6}
+ ARM(	stmdb	r0, {sp, lr}^			)
+ THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
+
+	@
+	@ Enable the alignment trap while in kernel mode
+	@
+	alignment_trap r0
+
+	@
+	@ Clear FP to mark the first stack frame
+	@
+	zero_fp
+
+#ifdef CONFIG_IRQSOFF_TRACER
+	bl	trace_hardirqs_off
+#endif
+	.endm
+
+	.macro	kuser_cmpxchg_check
+#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#ifndef CONFIG_MMU
+#warning "NPTL on non MMU needs fixing"
+#else
+	@ Make sure our user space atomic helper is restarted
+	@ if it was interrupted in a critical region.  Here we
+	@ perform a quick test inline since it should be false
+	@ 99.9999% of the time.  The rest is done out of line.
+	cmp	r4, #TASK_SIZE
+	blhs	kuser_cmpxchg64_fixup
+#endif
+#endif
+	.endm
+
+	.align	5
+__dabt_usr:
+	usr_entry
+	kuser_cmpxchg_check
+	mov	r2, sp
+	dabt_helper
+	b	ret_from_exception
+ UNWIND(.fnend		)
+ENDPROC(__dabt_usr)
+
+	.align	5
+__irq_usr:
+	usr_entry
+	kuser_cmpxchg_check
+#ifdef CONFIG_IRQ_STACK
+	mov r6, sp
+	ldr sp, =irq_handler_stack_top
+#endif
+	irq_handler
+#ifdef CONFIG_IRQ_STACK
+	mov sp, r6
+#endif
+
+	get_thread_info tsk
+	mov	why, #0
+	b	ret_to_user_from_irq
+ UNWIND(.fnend		)
+ENDPROC(__irq_usr)
+
+	.ltorg
+
+	.align	5
+__und_usr:
+	usr_entry
+
+	mov	r2, r4
+	mov	r3, r5
+
+	@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
+	@      faulting instruction depending on Thumb mode.
+	@ r3 = regs->ARM_cpsr
+	@
+	@ The emulation code returns using r9 if it has emulated the
+	@ instruction, or the more conventional lr if we are to treat
+	@ this as a real undefined instruction
+	@
+	adr	r9, BSYM(ret_from_exception)
+
+	tst	r3, #PSR_T_BIT			@ Thumb mode?
+	bne	__und_usr_thumb
+	sub	r4, r2, #4			@ ARM instr at LR - 4
+1:	ldrt	r0, [r4]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	rev	r0, r0				@ little endian instruction
+#endif
+	@ r0 = 32-bit ARM instruction which caused the exception
+	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
+	@ r4 = PC value for the faulting instruction
+	@ lr = 32-bit undefined instruction function
+	adr	lr, BSYM(__und_usr_fault_32)
+	b	call_fpe
+
+__und_usr_thumb:
+	@ Thumb instruction
+	sub	r4, r2, #2			@ First half of thumb instr at LR - 2
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+/*
+ * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
+ * can never be supported in a single kernel, this code is not applicable at
+ * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
+ * made about .arch directives.
+ */
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
+#define NEED_CPU_ARCHITECTURE
+	ldr	r5, .LCcpu_architecture
+	ldr	r5, [r5]
+	cmp	r5, #CPU_ARCH_ARMv7
+	blo	__und_usr_fault_16		@ 16bit undefined instruction
+/*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless.  Temporarily
+ * override the assembler target arch with the minimum required instead:
+ */
+	.arch	armv6t2
+#endif
+2:	ldrht	r5, [r4]
+	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
+	blo	__und_usr_fault_16		@ 16bit undefined instruction
+3:	ldrht	r0, [r2]
+	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
+	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
+	orr	r0, r0, r5, lsl #16
+	adr	lr, BSYM(__und_usr_fault_32)
+	@ r0 = the two 16-bit Thumb instructions which caused the exception
+	@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
+	@ r4 = PC value for the first 16-bit Thumb instruction
+	@ lr = 32bit undefined instruction function
+
+#if __LINUX_ARM_ARCH__ < 7
+/* If the target arch was overridden, change it back: */
+#ifdef CONFIG_CPU_32v6K
+	.arch	armv6k
+#else
+	.arch	armv6
+#endif
+#endif /* __LINUX_ARM_ARCH__ < 7 */
+#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
+	b	__und_usr_fault_16
+#endif
+ UNWIND(.fnend)
+ENDPROC(__und_usr)
+
+/*
+ * The out of line fixup for the ldrt instructions above.
+ */
+	.pushsection .fixup, "ax"
+4:	mov	pc, r9
+	.popsection
+	.pushsection __ex_table,"a"
+	.long	1b, 4b
+#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+	.long	2b, 4b
+	.long	3b, 4b
+#endif
+	.popsection
+
+/*
+ * Check whether the instruction is a co-processor instruction.
+ * If yes, we need to call the relevant co-processor handler.
+ *
+ * Note that we don't do a full check here for the co-processor
+ * instructions; all instructions with bit 27 set are well
+ * defined.  The only instructions that should fault are the
+ * co-processor instructions.  However, we have to watch out
+ * for the ARM6/ARM7 SWI bug.
+ *
+ * NEON is a special case that has to be handled here. Not all
+ * NEON instructions are co-processor instructions, so we have
+ * to make a special case of checking for them. Plus, there's
+ * five groups of them, so we have a table of mask/opcode pairs
+ * to check against, and if any match then we branch off into the
+ * NEON handler code.
+ *
+ * Emulators may wish to make use of the following registers:
+ *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ *  r2  = PC value to resume execution after successful emulation
+ *  r9  = normal "successful" return address
+ *  r10 = this threads thread_info structure
+ *  lr  = unrecognised instruction return address
+ * IRQs disabled, FIQs enabled.
+ */
+	@
+	@ Fall-through from Thumb-2 __und_usr
+	@
+#ifdef CONFIG_NEON
+	adr	r6, .LCneon_thumb_opcodes
+	b	2f
+#endif
+call_fpe:
+#ifdef CONFIG_NEON
+	adr	r6, .LCneon_arm_opcodes
+2:
+	ldr	r7, [r6], #4			@ mask value
+	cmp	r7, #0				@ end mask?
+	beq	1f
+	and	r8, r0, r7
+	ldr	r7, [r6], #4			@ opcode bits matching in mask
+	cmp	r8, r7				@ NEON instruction?
+	bne	2b
+	get_thread_info r10
+	mov	r7, #1
+	strb	r7, [r10, #TI_USED_CP + 10]	@ mark CP#10 as used
+	strb	r7, [r10, #TI_USED_CP + 11]	@ mark CP#11 as used
+	b	do_vfp				@ let VFP handler handle this
+1:
+#endif
+	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
+	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
+#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
+	and	r8, r0, #0x0f000000		@ mask out op-code bits
+	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
+#endif
+	moveq	pc, lr
+	get_thread_info r10			@ get current thread
+	and	r8, r0, #0x00000f00		@ mask out CP number
+ THUMB(	lsr	r8, r8, #8		)
+	mov	r7, #1
+	add	r6, r10, #TI_USED_CP
+ ARM(	strb	r7, [r6, r8, lsr #8]	)	@ set appropriate used_cp[]
+ THUMB(	strb	r7, [r6, r8]		)	@ set appropriate used_cp[]
+#ifdef CONFIG_IWMMXT
+	@ Test if we need to give access to iWMMXt coprocessors
+	ldr	r5, [r10, #TI_FLAGS]
+	rsbs	r7, r8, #(1 << 8)		@ CP 0 or 1 only
+	movcss	r7, r5, lsr #(TIF_USING_IWMMXT + 1)
+	bcs	iwmmxt_task_enable
+#endif
+ ARM(	add	pc, pc, r8, lsr #6	)
+ THUMB(	lsl	r8, r8, #2		)
+ THUMB(	add	pc, r8			)
+	nop
+
+	movw_pc	lr				@ CP#0
+	W(b)	do_fpe				@ CP#1 (FPE)
+	W(b)	do_fpe				@ CP#2 (FPE)
+	movw_pc	lr				@ CP#3
+#ifdef CONFIG_CRUNCH
+	b	crunch_task_enable		@ CP#4 (MaverickCrunch)
+	b	crunch_task_enable		@ CP#5 (MaverickCrunch)
+	b	crunch_task_enable		@ CP#6 (MaverickCrunch)
+#else
+	movw_pc	lr				@ CP#4
+	movw_pc	lr				@ CP#5
+	movw_pc	lr				@ CP#6
+#endif
+	movw_pc	lr				@ CP#7
+	movw_pc	lr				@ CP#8
+	movw_pc	lr				@ CP#9
+#ifdef CONFIG_VFP
+	W(b)	do_vfp				@ CP#10 (VFP)
+	W(b)	do_vfp				@ CP#11 (VFP)
+#else
+	movw_pc	lr				@ CP#10 (VFP)
+	movw_pc	lr				@ CP#11 (VFP)
+#endif
+	movw_pc	lr				@ CP#12
+	movw_pc	lr				@ CP#13
+	movw_pc	lr				@ CP#14 (Debug)
+	movw_pc	lr				@ CP#15 (Control)
+
+#ifdef NEED_CPU_ARCHITECTURE
+	.align	2
+.LCcpu_architecture:
+	.word	__cpu_architecture
+#endif
+
+#ifdef CONFIG_NEON
+	.align	6
+
+.LCneon_arm_opcodes:
+	.word	0xfe000000			@ mask
+	.word	0xf2000000			@ opcode
+
+	.word	0xff100000			@ mask
+	.word	0xf4000000			@ opcode
+
+	.word	0x00000000			@ mask
+	.word	0x00000000			@ opcode
+
+.LCneon_thumb_opcodes:
+	.word	0xef000000			@ mask
+	.word	0xef000000			@ opcode
+
+	.word	0xff100000			@ mask
+	.word	0xf9000000			@ opcode
+
+	.word	0x00000000			@ mask
+	.word	0x00000000			@ opcode
+#endif
+
+do_fpe:
+	enable_irq
+	ldr	r4, .LCfp
+	add	r10, r10, #TI_FPSTATE		@ r10 = workspace
+	ldr	pc, [r4]			@ Call FP module USR entry point
+
+/*
+ * The FP module is called with these registers set:
+ *  r0  = instruction
+ *  r2  = PC+4
+ *  r9  = normal "successful" return address
+ *  r10 = FP workspace
+ *  lr  = unrecognised FP instruction return address
+ */
+
+	.pushsection .data
+ENTRY(fp_enter)
+	.word	no_fp
+	.popsection
+
+ENTRY(no_fp)
+	mov	pc, lr
+ENDPROC(no_fp)
+
+__und_usr_fault_32:
+	mov	r1, #4
+	b	1f
+__und_usr_fault_16:
+	mov	r1, #2
+1:	enable_irq
+	mov	r0, sp
+	adr	lr, BSYM(ret_from_exception)
+	b	__und_fault
+ENDPROC(__und_usr_fault_32)
+ENDPROC(__und_usr_fault_16)
+
+	.align	5
+__pabt_usr:
+	usr_entry
+	mov	r2, sp				@ regs
+	pabt_helper
+ UNWIND(.fnend		)
+	/* fall through */
+/*
+ * This is the return code to user mode for abort handlers
+ */
+ENTRY(ret_from_exception)
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	get_thread_info tsk
+	mov	why, #0
+	b	ret_to_user
+ UNWIND(.fnend		)
+ENDPROC(__pabt_usr)
+ENDPROC(ret_from_exception)
+
+/*
+ * Register switch for ARMv3 and ARMv4 processors
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	add	ip, r1, #TI_CPU_SAVE
+ ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
+ THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
+ THUMB(	str	sp, [ip], #4		   )
+ THUMB(	str	lr, [ip], #4		   )
+	ldr	r4, [r2, #TI_TP_VALUE]
+	ldr	r5, [r2, #TI_TP_VALUE + 4]
+#ifdef CONFIG_CPU_USE_DOMAINS
+	ldr	r6, [r2, #TI_CPU_DOMAIN]
+#endif
+	switch_tls r1, r4, r5, r3, r7
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	ldr	r7, [r2, #TI_TASK]
+	ldr	r8, =__stack_chk_guard
+	ldr	r7, [r7, #TSK_STACK_CANARY]
+#endif
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
+#endif
+	mov	r5, r0
+	add	r4, r2, #TI_CPU_SAVE
+	ldr	r0, =thread_notify_head
+	mov	r1, #THREAD_NOTIFY_SWITCH
+	bl	atomic_notifier_call_chain
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	str	r7, [r8]
+#endif
+ THUMB(	mov	ip, r4			   )
+	mov	r0, r5
+ ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
+ THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
+ THUMB(	ldr	sp, [ip], #4		   )
+ THUMB(	ldr	pc, [ip]		   )
+ UNWIND(.fnend		)
+ENDPROC(__switch_to)
+
+	__INIT
+
+/*
+ * User helpers.
+ *
+ * Each segment is 32-byte aligned and will be moved to the top of the high
+ * vector page.  New segments (if ever needed) must be added in front of
+ * existing ones.  This mechanism should be used only for things that are
+ * really small and justified, and not be abused freely.
+ *
+ * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
+ */
+ THUMB(	.arm	)
+
+	.macro	usr_ret, reg
+#ifdef CONFIG_ARM_THUMB
+	bx	\reg
+#else
+	mov	pc, \reg
+#endif
+	.endm
+
+	.align	5
+	.globl	__kuser_helper_start
+__kuser_helper_start:
+
+/*
+ * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
+ * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
+ */
+
+__kuser_cmpxchg64:				@ 0xffff0f60
+
+#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+
+	/*
+	 * Poor you.  No fast solution possible...
+	 * The kernel itself must perform the operation.
+	 * A special ghost syscall is used for that (see traps.c).
+	 */
+	stmfd	sp!, {r7, lr}
+	ldr	r7, 1f			@ it's 20 bits
+	swi	__ARM_NR_cmpxchg64
+	ldmfd	sp!, {r7, pc}
+1:	.word	__ARM_NR_cmpxchg64
+
+#elif defined(CONFIG_CPU_32v6K)
+
+	stmfd	sp!, {r4, r5, r6, r7}
+	ldrd	r4, r5, [r0]			@ load old val
+	ldrd	r6, r7, [r1]			@ load new val
+	smp_dmb	arm
+1:	ldrexd	r0, r1, [r2]			@ load current val
+	eors	r3, r0, r4			@ compare with oldval (1)
+	eoreqs	r3, r1, r5			@ compare with oldval (2)
+	strexdeq r3, r6, r7, [r2]		@ store newval if eq
+	teqeq	r3, #1				@ success?
+	beq	1b				@ if no then retry
+	smp_dmb	arm
+	rsbs	r0, r3, #0			@ set returned val and C flag
+	ldmfd	sp!, {r4, r5, r6, r7}
+	usr_ret	lr
+
+#elif !defined(CONFIG_SMP)
+
+#ifdef CONFIG_MMU
+
+	/*
+	 * The only thing that can break atomicity in this cmpxchg64
+	 * implementation is either an IRQ or a data abort exception
+	 * causing another process/thread to be scheduled in the middle of
+	 * the critical sequence.  The same strategy as for cmpxchg is used.
+	 */
+	stmfd	sp!, {r4, r5, r6, lr}
+	ldmia	r0, {r4, r5}			@ load old val
+	ldmia	r1, {r6, lr}			@ load new val
+1:	ldmia	r2, {r0, r1}			@ load current val
+	eors	r3, r0, r4			@ compare with oldval (1)
+	eoreqs	r3, r1, r5			@ compare with oldval (2)
+2:	stmeqia	r2, {r6, lr}			@ store newval if eq
+	rsbs	r0, r3, #0			@ set return val and C flag
+	ldmfd	sp!, {r4, r5, r6, pc}
+
+	.text
+kuser_cmpxchg64_fixup:
+	@ Called from kuser_cmpxchg_fixup.
+	@ r4 = address of interrupted insn (must be preserved).
+	@ sp = saved regs. r7 and r8 are clobbered.
+	@ 1b = first critical insn, 2b = last critical insn.
+	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
+	mov	r7, #0xffff0fff
+	sub	r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
+	subs	r8, r4, r7
+	rsbcss	r8, r8, #(2b - 1b)
+	strcs	r7, [sp, #S_PC]
+#if __LINUX_ARM_ARCH__ < 6
+	bcc	kuser_cmpxchg32_fixup
+#endif
+	mov	pc, lr
+	.previous
+
+#else
+#warning "NPTL on non MMU needs fixing"
+	mov	r0, #-1
+	adds	r0, r0, #0
+	usr_ret	lr
+#endif
+
+#else
+#error "incoherent kernel configuration"
+#endif
+
+	/* pad to next slot */
+	.rept	(16 - (. - __kuser_cmpxchg64)/4)
+	.word	0
+	.endr
+
+	.align	5
+
+__kuser_memory_barrier:				@ 0xffff0fa0
+	smp_dmb	arm
+	usr_ret	lr
+
+	.align	5
+
+__kuser_cmpxchg:				@ 0xffff0fc0
+
+#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+
+	/*
+	 * Poor you.  No fast solution possible...
+	 * The kernel itself must perform the operation.
+	 * A special ghost syscall is used for that (see traps.c).
+	 */
+	stmfd	sp!, {r7, lr}
+	ldr	r7, 1f			@ it's 20 bits
+	swi	__ARM_NR_cmpxchg
+	ldmfd	sp!, {r7, pc}
+1:	.word	__ARM_NR_cmpxchg
+
+#elif __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_MMU
+
+	/*
+	 * The only thing that can break atomicity in this cmpxchg
+	 * implementation is either an IRQ or a data abort exception
+	 * causing another process/thread to be scheduled in the middle
+	 * of the critical sequence.  To prevent this, code is added to
+	 * the IRQ and data abort exception handlers to set the pc back
+	 * to the beginning of the critical section if it is found to be
+	 * within that critical section (see kuser_cmpxchg_fixup).
+	 */
+1:	ldr	r3, [r2]			@ load current val
+	subs	r3, r3, r0			@ compare with oldval
+2:	streq	r1, [r2]			@ store newval if eq
+	rsbs	r0, r3, #0			@ set return val and C flag
+	usr_ret	lr
+
+	.text
+kuser_cmpxchg32_fixup:
+	@ Called from kuser_cmpxchg_check macro.
+	@ r4 = address of interrupted insn (must be preserved).
+	@ sp = saved regs. r7 and r8 are clobbered.
+	@ 1b = first critical insn, 2b = last critical insn.
+	@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
+	mov	r7, #0xffff0fff
+	sub	r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
+	subs	r8, r4, r7
+	rsbcss	r8, r8, #(2b - 1b)
+	strcs	r7, [sp, #S_PC]
+	mov	pc, lr
+	.previous
+
+#else
+#warning "NPTL on non MMU needs fixing"
+	mov	r0, #-1
+	adds	r0, r0, #0
+	usr_ret	lr
+#endif
+
+#else
+
+	smp_dmb	arm
+1:	ldrex	r3, [r2]
+	subs	r3, r3, r0
+	strexeq	r3, r1, [r2]
+	teqeq	r3, #1
+	beq	1b
+	rsbs	r0, r3, #0
+	/* beware -- each __kuser slot must be 8 instructions max */
+	ALT_SMP(b	__kuser_memory_barrier)
+	ALT_UP(usr_ret	lr)
+
+#endif
+
+	.align	5
+
+__kuser_get_tls:				@ 0xffff0fe0
+	ldr	r0, [pc, #(16 - 8)]	@ read TLS, set in kuser_get_tls_init
+	usr_ret	lr
+	mrc	p15, 0, r0, c13, c0, 3	@ 0xffff0fe8 hardware TLS code
+	.rep	4
+	.word	0			@ 0xffff0ff0 software TLS value, then
+	.endr				@ pad up to __kuser_helper_version
+
+__kuser_helper_version:				@ 0xffff0ffc
+	.word	((__kuser_helper_end - __kuser_helper_start) >> 5)
+
+	.globl	__kuser_helper_end
+__kuser_helper_end:
+
+ THUMB(	.thumb	)
+
+/*
+ * Vector stubs.
+ *
+ * This code is copied to 0xffff0200 so we can use branches in the
+ * vectors, rather than ldr's.  Note that this code must not
+ * exceed 0x300 bytes.
+ *
+ * Common stub entry macro:
+ *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ *
+ * SP points to a minimal amount of processor-private memory, the address
+ * of which is copied into r0 for the mode specific abort handler.
+ */
+	.macro	vector_stub, name, mode, correction=0
+	.align	5
+
+vector_\name:
+	.if \correction
+	sub	lr, lr, #\correction
+	.endif
+
+	@
+	@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
+	@ (parent CPSR)
+	@
+	stmia	sp, {r0, lr}		@ save r0, lr
+	mrs	lr, spsr
+	str	lr, [sp, #8]		@ save spsr
+
+	@
+	@ Prepare for SVC32 mode.  IRQs remain disabled.
+	@
+	mrs	r0, cpsr
+	eor	r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
+	msr	spsr_cxsf, r0
+
+	@
+	@ the branch table must immediately follow this code
+	@
+	and	lr, lr, #0x0f
+ THUMB(	adr	r0, 1f			)
+ THUMB(	ldr	lr, [r0, lr, lsl #2]	)
+	mov	r0, sp
+ ARM(	ldr	lr, [pc, lr, lsl #2]	)
+	movs	pc, lr			@ branch to handler in SVC mode
+ENDPROC(vector_\name)
+
+	.align	2
+	@ handler addresses follow this label
+1:
+	.endm
+
+	.globl	__stubs_start
+__stubs_start:
+/*
+ * Interrupt dispatcher
+ */
+	vector_stub	irq, IRQ_MODE, 4
+
+	.long	__irq_usr			@  0  (USR_26 / USR_32)
+	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
+	.long	__irq_invalid			@  2  (IRQ_26 / IRQ_32)
+	.long	__irq_svc			@  3  (SVC_26 / SVC_32)
+	.long	__irq_invalid			@  4
+	.long	__irq_invalid			@  5
+	.long	__irq_invalid			@  6
+	.long	__irq_invalid			@  7
+	.long	__irq_invalid			@  8
+	.long	__irq_invalid			@  9
+	.long	__irq_invalid			@  a
+	.long	__irq_invalid			@  b
+	.long	__irq_invalid			@  c
+	.long	__irq_invalid			@  d
+	.long	__irq_invalid			@  e
+	.long	__irq_invalid			@  f
+
+/*
+ * Data abort dispatcher
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+	vector_stub	dabt, ABT_MODE, 8
+
+	.long	__dabt_usr			@  0  (USR_26 / USR_32)
+	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
+	.long	__dabt_invalid			@  2  (IRQ_26 / IRQ_32)
+	.long	__dabt_svc			@  3  (SVC_26 / SVC_32)
+	.long	__dabt_invalid			@  4
+	.long	__dabt_invalid			@  5
+	.long	__dabt_invalid			@  6
+	.long	__dabt_invalid			@  7
+	.long	__dabt_invalid			@  8
+	.long	__dabt_invalid			@  9
+	.long	__dabt_invalid			@  a
+	.long	__dabt_invalid			@  b
+	.long	__dabt_invalid			@  c
+	.long	__dabt_invalid			@  d
+	.long	__dabt_invalid			@  e
+	.long	__dabt_invalid			@  f
+
+/*
+ * Prefetch abort dispatcher
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+	vector_stub	pabt, ABT_MODE, 4
+
+	.long	__pabt_usr			@  0 (USR_26 / USR_32)
+	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
+	.long	__pabt_invalid			@  2 (IRQ_26 / IRQ_32)
+	.long	__pabt_svc			@  3 (SVC_26 / SVC_32)
+	.long	__pabt_invalid			@  4
+	.long	__pabt_invalid			@  5
+	.long	__pabt_invalid			@  6
+	.long	__pabt_invalid			@  7
+	.long	__pabt_invalid			@  8
+	.long	__pabt_invalid			@  9
+	.long	__pabt_invalid			@  a
+	.long	__pabt_invalid			@  b
+	.long	__pabt_invalid			@  c
+	.long	__pabt_invalid			@  d
+	.long	__pabt_invalid			@  e
+	.long	__pabt_invalid			@  f
+
+/*
+ * Undef instr entry dispatcher
+ * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+	vector_stub	und, UND_MODE
+
+	.long	__und_usr			@  0 (USR_26 / USR_32)
+	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
+	.long	__und_invalid			@  2 (IRQ_26 / IRQ_32)
+	.long	__und_svc			@  3 (SVC_26 / SVC_32)
+	.long	__und_invalid			@  4
+	.long	__und_invalid			@  5
+	.long	__und_invalid			@  6
+	.long	__und_invalid			@  7
+	.long	__und_invalid			@  8
+	.long	__und_invalid			@  9
+	.long	__und_invalid			@  a
+	.long	__und_invalid			@  b
+	.long	__und_invalid			@  c
+	.long	__und_invalid			@  d
+	.long	__und_invalid			@  e
+	.long	__und_invalid			@  f
+
+	.align	5
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
+ * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
+ * Basically to switch modes, we *HAVE* to clobber one register...  brain
+ * damage alert!  I don't think that we can execute any code in here in any
+ * other mode than FIQ...  Ok you can switch to another mode, but you can't
+ * get out of that mode without clobbering one register.
+ */
+vector_fiq:
+	subs	pc, lr, #4
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+	b	vector_addrexcptn
+
+/*
+ * We group all the following data together to optimise
+ * for CPUs with separate I & D caches.
+ */
+	.align	5
+
+.LCvswi:
+	.word	vector_swi
+
+	.globl	__stubs_end
+__stubs_end:
+
+	.equ	stubs_offset, __vectors_start + 0x200 - __stubs_start
+
+	.globl	__vectors_start
+__vectors_start:
+ ARM(	swi	SYS_ERROR0	)
+ THUMB(	svc	#0		)
+ THUMB(	nop			)
+	W(b)	vector_und + stubs_offset
+	W(ldr)	pc, .LCvswi + stubs_offset
+	W(b)	vector_pabt + stubs_offset
+	W(b)	vector_dabt + stubs_offset
+	W(b)	vector_addrexcptn + stubs_offset
+	W(b)	vector_irq + stubs_offset
+	W(b)	vector_fiq + stubs_offset
+
+	.globl	__vectors_end
+__vectors_end:
+
+	.data
+
+	.globl	cr_alignment
+	.globl	cr_no_alignment
+cr_alignment:
+	.space	4
+cr_no_alignment:
+	.space	4
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	.globl	handle_arch_irq
+handle_arch_irq:
+	.space	4
+#endif
+
+#ifdef CONFIG_IRQ_STACK
+irq_handler_stack_bottom:
+    .rept   1024
+    .long   0
+    .endr
+irq_handler_stack_top:
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-common.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-common.S
new file mode 100644
index 0000000..54ee265
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-common.S
@@ -0,0 +1,656 @@
+/*
+ *  linux/arch/arm/kernel/entry-common.S
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/unistd.h>
+#include <asm/ftrace.h>
+#include <asm/unwind.h>
+
+#ifdef CONFIG_NEED_RET_TO_USER
+#include <mach/entry-macro.S>
+#else
+	.macro  arch_ret_to_user, tmp1, tmp2
+	.endm
+#endif
+
+#include "entry-header.S"
+
+
+	.align	5
+/*
+ * This is the fast syscall return path.  We do as little as
+ * possible here, and this includes saving r0 back into the SVC
+ * stack.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	disable_irq				@ disable interrupts
+	ldr	r1, [tsk, #TI_FLAGS]
+	tst	r1, #_TIF_WORK_MASK
+	bne	fast_work_pending
+#if defined(CONFIG_IRQSOFF_TRACER)
+	asm_trace_hardirqs_on
+#endif
+
+	/* perform architecture specific actions before user return */
+	arch_ret_to_user r1, lr
+
+	restore_user_regs fast = 1, offset = S_OFF
+ UNWIND(.fnend		)
+
+/*
+ * Ok, we need to do extra processing, enter the slow path.
+ */
+fast_work_pending:
+	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
+work_pending:
+	tst	r1, #_TIF_NEED_RESCHED
+	bne	work_resched
+	tst	r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
+	beq	no_work_pending
+	mov	r0, sp				@ 'regs'
+	mov	r2, why				@ 'syscall'
+	tst	r1, #_TIF_SIGPENDING		@ delivering a signal?
+	movne	why, #0				@ prevent further restarts
+	bl	do_notify_resume
+	b	ret_slow_syscall		@ Check work again
+
+work_resched:
+	bl	schedule
+/*
+ * "slow" syscall return path.  "why" tells us if this was a real syscall.
+ */
+ENTRY(ret_to_user)
+ret_slow_syscall:
+	disable_irq				@ disable interrupts
+ENTRY(ret_to_user_from_irq)
+	ldr	r1, [tsk, #TI_FLAGS]
+	tst	r1, #_TIF_WORK_MASK
+	bne	work_pending
+no_work_pending:
+#if defined(CONFIG_IRQSOFF_TRACER)
+	asm_trace_hardirqs_on
+#endif
+	/* perform architecture specific actions before user return */
+	arch_ret_to_user r1, lr
+
+	restore_user_regs fast = 0, offset = 0
+ENDPROC(ret_to_user_from_irq)
+ENDPROC(ret_to_user)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+	bl	schedule_tail
+	get_thread_info tsk
+	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
+	mov	why, #1
+	tst	r1, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
+	beq	ret_slow_syscall
+	mov	r1, sp
+	mov	r0, #1				@ trace exit [IP = 1]
+	bl	syscall_trace
+	b	ret_slow_syscall
+ENDPROC(ret_from_fork)
+
+	.equ NR_syscalls,0
+#define CALL(x) .equ NR_syscalls,NR_syscalls+1
+#include "calls.S"
+#undef CALL
+#define CALL(x) .long x
+
+#ifdef CONFIG_FUNCTION_TRACER
+/*
+ * When compiling with -pg, gcc inserts a call to the mcount routine at the
+ * start of every function.  In mcount, apart from the function's address (in
+ * lr), we need to get hold of the function's caller's address.
+ *
+ * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
+ *
+ *	bl	mcount
+ *
+ * These versions have the limitation that in order for the mcount routine to
+ * be able to determine the function's caller's address, an APCS-style frame
+ * pointer (which is set up with something like the code below) is required.
+ *
+ *	mov     ip, sp
+ *	push    {fp, ip, lr, pc}
+ *	sub     fp, ip, #4
+ *
+ * With EABI, these frame pointers are not available unless -mapcs-frame is
+ * specified, and if building as Thumb-2, not even then.
+ *
+ * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
+ * with call sites like:
+ *
+ *	push	{lr}
+ *	bl	__gnu_mcount_nc
+ *
+ * With these compilers, frame pointers are not necessary.
+ *
+ * mcount can be thought of as a function called in the middle of a subroutine
+ * call.  As such, it needs to be transparent for both the caller and the
+ * callee: the original lr needs to be restored when leaving mcount, and no
+ * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
+ * clobber the ip register.  This is OK because the ARM calling convention
+ * allows it to be clobbered in subroutines and doesn't use it to hold
+ * parameters.)
+ *
+ * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
+ * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
+ * arch/arm/kernel/ftrace.c).
+ */
+
+#ifndef CONFIG_OLD_MCOUNT
+#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
+#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
+#endif
+#endif
+
+.macro mcount_adjust_addr rd, rn
+	bic	\rd, \rn, #1		@ clear the Thumb bit if present
+	sub	\rd, \rd, #MCOUNT_INSN_SIZE
+.endm
+
+.macro __mcount suffix
+	mcount_enter
+	ldr	r0, =ftrace_trace_function
+	ldr	r2, [r0]
+	adr	r0, .Lftrace_stub
+	cmp	r0, r2
+	bne	1f
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	ldr     r1, =ftrace_graph_return
+	ldr     r2, [r1]
+	cmp     r0, r2
+	bne     ftrace_graph_caller\suffix
+
+	ldr     r1, =ftrace_graph_entry
+	ldr     r2, [r1]
+	ldr     r0, =ftrace_graph_entry_stub
+	cmp     r0, r2
+	bne     ftrace_graph_caller\suffix
+#endif
+
+	mcount_exit
+
+1: 	mcount_get_lr	r1			@ lr of instrumented func
+	mcount_adjust_addr	r0, lr		@ instrumented function
+	adr	lr, BSYM(2f)
+	mov	pc, r2
+2:	mcount_exit
+.endm
+
+.macro __ftrace_caller suffix
+	mcount_enter
+
+	mcount_get_lr	r1			@ lr of instrumented func
+	mcount_adjust_addr	r0, lr		@ instrumented function
+
+	.globl ftrace_call\suffix
+ftrace_call\suffix:
+	bl	ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.globl ftrace_graph_call\suffix
+ftrace_graph_call\suffix:
+	mov	r0, r0
+#endif
+
+	mcount_exit
+.endm
+
+.macro __ftrace_graph_caller
+	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
+#ifdef CONFIG_DYNAMIC_FTRACE
+	@ called from __ftrace_caller, saved in mcount_enter
+	ldr	r1, [sp, #16]		@ instrumented routine (func)
+	mcount_adjust_addr	r1, r1
+#else
+	@ called from __mcount, untouched in lr
+	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
+#endif
+	mov	r2, fp			@ frame pointer
+	bl	prepare_ftrace_return
+	mcount_exit
+.endm
+
+#ifdef CONFIG_OLD_MCOUNT
+/*
+ * mcount
+ */
+
+.macro mcount_enter
+	stmdb	sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+	ldr	\reg, [fp, #-4]
+.endm
+
+.macro mcount_exit
+	ldr	lr, [fp, #-4]
+	ldmia	sp!, {r0-r3, pc}
+.endm
+
+ENTRY(mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+	stmdb	sp!, {lr}
+	ldr	lr, [fp, #-4]
+	ldmia	sp!, {pc}
+#else
+	__mcount _old
+#endif
+ENDPROC(mcount)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller_old)
+	__ftrace_caller _old
+ENDPROC(ftrace_caller_old)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller_old)
+	__ftrace_graph_caller
+ENDPROC(ftrace_graph_caller_old)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+#endif
+
+/*
+ * __gnu_mcount_nc
+ */
+
+.macro mcount_enter
+	stmdb	sp!, {r0-r3, lr}
+.endm
+
+.macro mcount_get_lr reg
+	ldr	\reg, [sp, #20]
+.endm
+
+.macro mcount_exit
+	ldmia	sp!, {r0-r3, ip, lr}
+	mov	pc, ip
+.endm
+
+ENTRY(__gnu_mcount_nc)
+#ifdef CONFIG_DYNAMIC_FTRACE
+	mov	ip, lr
+	ldmia	sp!, {lr}
+	mov	pc, ip
+#else
+	__mcount
+#endif
+ENDPROC(__gnu_mcount_nc)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+	__ftrace_caller
+ENDPROC(ftrace_caller)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+	__ftrace_graph_caller
+ENDPROC(ftrace_graph_caller)
+#endif
+
+.purgem mcount_enter
+.purgem mcount_get_lr
+.purgem mcount_exit
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.globl return_to_handler
+return_to_handler:
+	stmdb	sp!, {r0-r3}
+	mov	r0, fp			@ frame pointer
+	bl	ftrace_return_to_handler
+	mov	lr, r0			@ r0 has real ret addr
+	ldmia	sp!, {r0-r3}
+	mov	pc, lr
+#endif
+
+ENTRY(ftrace_stub)
+.Lftrace_stub:
+	mov	pc, lr
+ENDPROC(ftrace_stub)
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ */
+
+	/* If we're optimising for StrongARM the resulting code won't 
+	   run on an ARM7 and we can save a couple of instructions.  
+								--pb */
+#ifdef CONFIG_CPU_ARM710
+#define A710(code...) code
+.Larm710bug:
+	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
+	mov	r0, r0
+	add	sp, sp, #S_FRAME_SIZE
+	subs	pc, lr, #4
+#else
+#define A710(code...)
+#endif
+
+	.align	5
+ENTRY(vector_swi)
+	sub	sp, sp, #S_FRAME_SIZE
+	stmia	sp, {r0 - r12}			@ Calling r0 - r12
+ ARM(	add	r8, sp, #S_PC		)
+ ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
+ THUMB(	mov	r8, sp			)
+ THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
+	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
+	str	lr, [sp, #S_PC]			@ Save calling PC
+	str	r8, [sp, #S_PSR]		@ Save CPSR
+	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
+	zero_fp
+
+	/*
+	 * Get the system call number.
+	 */
+
+#if defined(CONFIG_OABI_COMPAT)
+
+	/*
+	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
+	 * value to determine if it is an EABI or an old ABI call.
+	 */
+#ifdef CONFIG_ARM_THUMB
+	tst	r8, #PSR_T_BIT
+	movne	r10, #0				@ no thumb OABI emulation
+	ldreq	r10, [lr, #-4]			@ get SWI instruction
+#else
+	ldr	r10, [lr, #-4]			@ get SWI instruction
+  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
+  A710(	teq	ip, #0x0f000000						)
+  A710(	bne	.Larm710bug						)
+#endif
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	rev	r10, r10			@ little endian instruction
+#endif
+
+#elif defined(CONFIG_AEABI)
+
+	/*
+	 * Pure EABI user space always put syscall number into scno (r7).
+	 */
+  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
+  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
+  A710(	teq	ip, #0x0f000000						)
+  A710(	bne	.Larm710bug						)
+
+#elif defined(CONFIG_ARM_THUMB)
+
+	/* Legacy ABI only, possibly thumb mode. */
+	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
+	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
+	ldreq	scno, [lr, #-4]
+
+#else
+
+	/* Legacy ABI only. */
+	ldr	scno, [lr, #-4]			@ get SWI instruction
+  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
+  A710(	teq	ip, #0x0f000000						)
+  A710(	bne	.Larm710bug						)
+
+#endif
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+	ldr	ip, __cr_alignment
+	ldr	ip, [ip]
+	mcr	p15, 0, ip, c1, c0		@ update control register
+#endif
+	enable_irq
+
+	get_thread_info tsk
+	adr	tbl, sys_call_table		@ load syscall table pointer
+
+#if defined(CONFIG_OABI_COMPAT)
+	/*
+	 * If the swi argument is zero, this is an EABI call and we do nothing.
+	 *
+	 * If this is an old ABI call, get the syscall number into scno and
+	 * get the old ABI syscall table address.
+	 */
+	bics	r10, r10, #0xff000000
+	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
+	ldrne	tbl, =sys_oabi_call_table
+#elif !defined(CONFIG_AEABI)
+	bic	scno, scno, #0xff000000		@ mask off SWI op-code
+	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
+#endif
+
+	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
+	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
+
+#ifdef CONFIG_SECCOMP
+	tst	r10, #_TIF_SECCOMP
+	beq	1f
+	mov	r0, scno
+	bl	__secure_computing	
+	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
+	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
+1:
+#endif
+
+	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
+	bne	__sys_trace
+
+	cmp	scno, #NR_syscalls		@ check upper syscall limit
+	adr	lr, BSYM(ret_fast_syscall)	@ return address
+	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
+
+	add	r1, sp, #S_OFF
+2:	mov	why, #0				@ no longer a real syscall
+	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
+	bcs	arm_syscall	
+	b	sys_ni_syscall			@ not private func
+ENDPROC(vector_swi)
+
+	/*
+	 * This is the really slow path.  We're going to be doing
+	 * context switches, and waiting for our parent to respond.
+	 */
+__sys_trace:
+	mov	r2, scno
+	add	r1, sp, #S_OFF
+	mov	r0, #0				@ trace entry [IP = 0]
+	bl	syscall_trace
+
+	adr	lr, BSYM(__sys_trace_return)	@ return address
+	mov	scno, r0			@ syscall number (possibly new)
+	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
+	cmp	scno, #NR_syscalls		@ check upper syscall limit
+	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
+	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
+	b	2b
+
+__sys_trace_return:
+	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+	mov	r2, scno
+	mov	r1, sp
+	mov	r0, #1				@ trace exit [IP = 1]
+	bl	syscall_trace
+	b	ret_slow_syscall
+
+	.align	5
+#ifdef CONFIG_ALIGNMENT_TRAP
+	.type	__cr_alignment, #object
+__cr_alignment:
+	.word	cr_alignment
+#endif
+	.ltorg
+
+/*
+ * This is the syscall table declaration for native ABI syscalls.
+ * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
+ */
+#define ABI(native, compat) native
+#ifdef CONFIG_AEABI
+#define OBSOLETE(syscall) sys_ni_syscall
+#else
+#define OBSOLETE(syscall) syscall
+#endif
+
+	.type	sys_call_table, #object
+ENTRY(sys_call_table)
+#include "calls.S"
+#undef ABI
+#undef OBSOLETE
+
+/*============================================================================
+ * Special system call wrappers
+ */
+@ r0 = syscall number
+@ r8 = syscall table
+sys_syscall:
+		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
+		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
+		cmpne	scno, #NR_syscalls	@ check range
+		stmloia	sp, {r5, r6}		@ shuffle args
+		movlo	r0, r1
+		movlo	r1, r2
+		movlo	r2, r3
+		movlo	r3, r4
+		ldrlo	pc, [tbl, scno, lsl #2]
+		b	sys_ni_syscall
+ENDPROC(sys_syscall)
+
+sys_fork_wrapper:
+		add	r0, sp, #S_OFF
+		b	sys_fork
+ENDPROC(sys_fork_wrapper)
+
+sys_vfork_wrapper:
+		add	r0, sp, #S_OFF
+		b	sys_vfork
+ENDPROC(sys_vfork_wrapper)
+
+sys_execve_wrapper:
+		add	r3, sp, #S_OFF
+		b	sys_execve
+ENDPROC(sys_execve_wrapper)
+
+sys_clone_wrapper:
+		add	ip, sp, #S_OFF
+		str	ip, [sp, #4]
+		b	sys_clone
+ENDPROC(sys_clone_wrapper)
+
+sys_sigreturn_wrapper:
+		add	r0, sp, #S_OFF
+		mov	why, #0		@ prevent syscall restart handling
+		b	sys_sigreturn
+ENDPROC(sys_sigreturn_wrapper)
+
+sys_rt_sigreturn_wrapper:
+		add	r0, sp, #S_OFF
+		mov	why, #0		@ prevent syscall restart handling
+		b	sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn_wrapper)
+
+sys_sigaltstack_wrapper:
+		ldr	r2, [sp, #S_OFF + S_SP]
+		b	do_sigaltstack
+ENDPROC(sys_sigaltstack_wrapper)
+
+sys_statfs64_wrapper:
+		teq	r1, #88
+		moveq	r1, #84
+		b	sys_statfs64
+ENDPROC(sys_statfs64_wrapper)
+
+sys_fstatfs64_wrapper:
+		teq	r1, #88
+		moveq	r1, #84
+		b	sys_fstatfs64
+ENDPROC(sys_fstatfs64_wrapper)
+
+/*
+ * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
+ * offset, we return EINVAL.
+ */
+sys_mmap2:
+#if PAGE_SHIFT > 12
+		tst	r5, #PGOFF_MASK
+		moveq	r5, r5, lsr #PAGE_SHIFT - 12
+		streq	r5, [sp, #4]
+		beq	sys_mmap_pgoff
+		mov	r0, #-EINVAL
+		mov	pc, lr
+#else
+		str	r5, [sp, #4]
+		b	sys_mmap_pgoff
+#endif
+ENDPROC(sys_mmap2)
+
+#ifdef CONFIG_OABI_COMPAT
+
+/*
+ * These are syscalls with argument register differences
+ */
+
+sys_oabi_pread64:
+		stmia	sp, {r3, r4}
+		b	sys_pread64
+ENDPROC(sys_oabi_pread64)
+
+sys_oabi_pwrite64:
+		stmia	sp, {r3, r4}
+		b	sys_pwrite64
+ENDPROC(sys_oabi_pwrite64)
+
+sys_oabi_truncate64:
+		mov	r3, r2
+		mov	r2, r1
+		b	sys_truncate64
+ENDPROC(sys_oabi_truncate64)
+
+sys_oabi_ftruncate64:
+		mov	r3, r2
+		mov	r2, r1
+		b	sys_ftruncate64
+ENDPROC(sys_oabi_ftruncate64)
+
+sys_oabi_readahead:
+		str	r3, [sp]
+		mov	r3, r2
+		mov	r2, r1
+		b	sys_readahead
+ENDPROC(sys_oabi_readahead)
+
+/*
+ * Let's declare a second syscall table for old ABI binaries
+ * using the compatibility syscall entries.
+ */
+#define ABI(native, compat) compat
+#define OBSOLETE(syscall) syscall
+
+	.type	sys_oabi_call_table, #object
+ENTRY(sys_oabi_call_table)
+#include "calls.S"
+#undef ABI
+#undef OBSOLETE
+
+#endif
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-header.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-header.S
new file mode 100644
index 0000000..a8dd573
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/entry-header.S
@@ -0,0 +1,194 @@
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/thread_info.h>
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH	0
+#define BAD_DATA	1
+#define BAD_ADDREXCPTN	2
+#define BAD_IRQ		3
+#define BAD_UNDEFINSTR	4
+
+@
+@ Most of the stack format comes from struct pt_regs, but with
+@ the addition of 8 bytes for storing syscall args 5 and 6.
+@ This _must_ remain a multiple of 8 for EABI.
+@
+#define S_OFF		8
+
+/* 
+ * The SWI code relies on the fact that R0 is at the bottom of the stack
+ * (due to slow/fast restore user regs).
+ */
+#if S_R0 != 0
+#error "Please fix"
+#endif
+
+	.macro	zero_fp
+#ifdef CONFIG_FRAME_POINTER
+	mov	fp, #0
+#endif
+	.endm
+
+	.macro	alignment_trap, rtemp
+#ifdef CONFIG_ALIGNMENT_TRAP
+	ldr	\rtemp, .LCcralign
+	ldr	\rtemp, [\rtemp]
+	mcr	p15, 0, \rtemp, c1, c0
+#endif
+	.endm
+
+	@
+	@ Store/load the USER SP and LR registers by switching to the SYS
+	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
+	@ available. Should only be called from SVC mode
+	@
+	.macro	store_user_sp_lr, rd, rtemp, offset = 0
+	mrs	\rtemp, cpsr
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch to the SYS mode
+
+	str	sp, [\rd, #\offset]		@ save sp_usr
+	str	lr, [\rd, #\offset + 4]		@ save lr_usr
+
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
+	.endm
+
+	.macro	load_user_sp_lr, rd, rtemp, offset = 0
+	mrs	\rtemp, cpsr
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch to the SYS mode
+
+	ldr	sp, [\rd, #\offset]		@ load sp_usr
+	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr
+
+	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
+	.endm
+
+#ifndef CONFIG_THUMB2_KERNEL
+	.macro	svc_exit, rpsr
+	msr	spsr_cxsf, \rpsr
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	sub	r0, sp, #4			@ uninhabited address
+	strex	r1, r2, [r0]			@ clear the exclusive monitor
+#endif
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+	.endm
+
+	.macro	restore_user_regs, fast = 0, offset = 0
+	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
+	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
+	msr	spsr_cxsf, r1			@ save in spsr_svc
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r1, r2, [sp]			@ clear the exclusive monitor
+#endif
+	.if	\fast
+	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
+	.else
+	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
+	.endif
+	mov	r0, r0				@ ARMv5T and earlier require a nop
+						@ after ldm {}^
+	add	sp, sp, #S_FRAME_SIZE - S_PC
+	movs	pc, lr				@ return & move spsr_svc into cpsr
+	.endm
+#ifndef CONFIG_STACK_SIZE
+	.macro	get_thread_info, rd
+	mov	\rd, sp, lsr #13
+	mov	\rd, \rd, lsl #13
+	.endm
+#else
+	.extern current_kernel_thread
+	.macro	get_thread_info, rd
+	ldr     \rd, =current_kernel_thread
+	ldr     \rd,[\rd]
+	.endm
+#endif
+	@
+	@ 32-bit wide "mov pc, reg"
+	@
+	.macro	movw_pc, reg
+	mov	pc, \reg
+	.endm
+#else	/* CONFIG_THUMB2_KERNEL */
+	.macro	svc_exit, rpsr
+	ldr	lr, [sp, #S_SP]			@ top of the stack
+	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
+
+	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
+	ldmia	sp, {r0 - r12}
+	mov	sp, lr
+	ldr	lr, [sp], #4
+	rfeia	sp!
+	.endm
+
+	.macro	restore_user_regs, fast = 0, offset = 0
+	mov	r2, sp
+	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
+	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
+	ldr	lr, [sp, #\offset + S_PC]	@ get pc
+	add	sp, sp, #\offset + S_SP
+	msr	spsr_cxsf, r1			@ save in spsr_svc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r1, r2, [sp]			@ clear the exclusive monitor
+
+	.if	\fast
+	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
+	.else
+	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
+	.endif
+	add	sp, sp, #S_FRAME_SIZE - S_SP
+	movs	pc, lr				@ return & move spsr_svc into cpsr
+	.endm
+
+#ifndef CONFIG_STACK_SIZE
+	.macro	get_thread_info, rd
+	mov	\rd, sp
+	lsr	\rd, \rd, #13
+	mov	\rd, \rd, lsl #13
+	.endm
+#else
+	.extern current_kernel_thread
+	.macro	get_thread_info, rd
+	ldr     \rd, =current_kernel_thread
+	ldr     \rd,[\rd]
+	.endm
+#endif
+
+	@
+	@ 32-bit wide "mov pc, reg"
+	@
+	.macro	movw_pc, reg
+	mov	pc, \reg
+	nop
+	.endm
+#endif	/* !CONFIG_THUMB2_KERNEL */
+
+/*
+ * These are the registers used in the syscall handler, and allow us to
+ * have in theory up to 7 arguments to a function - r0 to r6.
+ *
+ * r7 is reserved for the system call number for thumb mode.
+ *
+ * Note that tbl == why is intentional.
+ *
+ * We must set at least "tsk" and "why" when calling ret_with_reschedule.
+ */
+scno	.req	r7		@ syscall number
+tbl	.req	r8		@ syscall table pointer
+why	.req	r8		@ Linux syscall (!= 0)
+tsk	.req	r9		@ current thread_info
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/etm.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/etm.c
new file mode 100644
index 0000000..c5fb6c9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/etm.c
@@ -0,0 +1,1076 @@
+/*
+ * linux/arch/arm/kernel/etm.c
+ *
+ * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer.
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ * Alexander Shishkin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/amba/bus.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <asm/hardware/coresight.h>
+#include <asm/sections.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shishkin");
+
+/*
+ * ETM tracer state
+ */
+struct tracectx {
+	unsigned int	etb_bufsz;
+	void __iomem	*etb_regs;
+	void __iomem	**etm_regs;
+	int		etm_regs_count;
+	unsigned long	flags;
+	int		ncmppairs;
+	int		etm_portsz;
+	int		etm_contextid_size;
+	u32		etb_fc;
+	unsigned long	range_start;
+	unsigned long	range_end;
+	unsigned long	data_range_start;
+	unsigned long	data_range_end;
+	bool		dump_initial_etb;
+	struct device	*dev;
+	struct clk	*emu_clk;
+	struct mutex	mutex;
+};
+
+static struct tracectx tracer = {
+	.range_start = (unsigned long)_stext,
+	.range_end = (unsigned long)_etext,
+};
+
+static inline bool trace_isrunning(struct tracectx *t)
+{
+	return !!(t->flags & TRACER_RUNNING);
+}
+
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
+		unsigned long start, unsigned long end, int exclude, int data)
+{
+	u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
+		    ETMAAT_NOVALCMP;
+
+	if (n < 1 || n > t->ncmppairs)
+		return -EINVAL;
+
+	/* comparators and ranges are numbered starting with 1 as opposed
+	 * to bits in a word */
+	n--;
+
+	if (data)
+		flags |= ETMAAT_DLOADSTORE;
+	else
+		flags |= ETMAAT_IEXEC;
+
+	/* first comparator for the range */
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+	etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
+
+	/* second comparator is right next to it */
+	etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+	etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+	if (data) {
+		flags = exclude ? ETMVDC3_EXCLONLY : 0;
+		if (exclude)
+			n += 8;
+		etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+	} else {
+		flags = exclude ? ETMTE_INCLEXCL : 0;
+		etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+	}
+
+	return 0;
+}
+
+static int trace_start_etm(struct tracectx *t, int id)
+{
+	u32 v;
+	unsigned long timeout = TRACER_TIMEOUT;
+
+	v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
+	v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
+
+	if (t->flags & TRACER_CYCLE_ACC)
+		v |= ETMCTRL_CYCLEACCURATE;
+
+	if (t->flags & TRACER_BRANCHOUTPUT)
+		v |= ETMCTRL_BRANCH_OUTPUT;
+
+	if (t->flags & TRACER_TRACE_DATA)
+		v |= ETMCTRL_DATA_DO_ADDR;
+
+	if (t->flags & TRACER_TIMESTAMP)
+		v |= ETMCTRL_TIMESTAMP_EN;
+
+	if (t->flags & TRACER_RETURN_STACK)
+		v |= ETMCTRL_RETURN_STACK_EN;
+
+	etm_unlock(t, id);
+
+	etm_writel(t, id, v, ETMR_CTRL);
+
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+		;
+	if (!timeout) {
+		dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	if (t->range_start || t->range_end)
+		etm_setup_address_range(t, id, 1,
+					t->range_start, t->range_end, 0, 0);
+	else
+		etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+	etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+	etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+	etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+	etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+	if (t->data_range_start || t->data_range_end)
+		etm_setup_address_range(t, id, 2, t->data_range_start,
+					t->data_range_end, 0, 1);
+	else
+		etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+	etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
+
+	v &= ~ETMCTRL_PROGRAM;
+	v |= ETMCTRL_PORTSEL;
+
+	etm_writel(t, id, v, ETMR_CTRL);
+
+	timeout = TRACER_TIMEOUT;
+	while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+		;
+	if (!timeout) {
+		dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+	int ret;
+	int id;
+	u32 etb_fc = t->etb_fc;
+
+	etb_unlock(t);
+
+	t->dump_initial_etb = false;
+	etb_writel(t, 0, ETBR_WRITEADDR);
+	etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+	etb_writel(t, 1, ETBR_CTRL);
+
+	etb_lock(t);
+
+	/* configure etm(s) */
+	for (id = 0; id < t->etm_regs_count; id++) {
+		ret = trace_start_etm(t, id);
+		if (ret)
+			return ret;
+	}
+
+	t->flags |= TRACER_RUNNING;
+
+	return 0;
+}
+
+static int trace_stop_etm(struct tracectx *t, int id)
+{
+	unsigned long timeout = TRACER_TIMEOUT;
+
+	etm_unlock(t, id);
+
+	etm_writel(t, id, 0x440, ETMR_CTRL);
+	while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+		;
+	if (!timeout) {
+		dev_err(t->dev,
+			"etm%d: Waiting for progbit to assert timed out\n",
+			id);
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_power_down_etm(struct tracectx *t, int id)
+{
+	unsigned long timeout = TRACER_TIMEOUT;
+	etm_unlock(t, id);
+	while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
+		;
+	if (!timeout) {
+		dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
+			id);
+		etm_lock(t, id);
+		return -EFAULT;
+	}
+
+	etm_writel(t, id, 0x441, ETMR_CTRL);
+
+	etm_lock(t, id);
+	return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+	int id;
+	unsigned long timeout = TRACER_TIMEOUT;
+	u32 etb_fc = t->etb_fc;
+
+	for (id = 0; id < t->etm_regs_count; id++)
+		trace_stop_etm(t, id);
+
+	for (id = 0; id < t->etm_regs_count; id++)
+		trace_power_down_etm(t, id);
+
+	etb_unlock(t);
+	if (etb_fc) {
+		etb_fc |= ETBFF_STOPFL;
+		etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+	}
+	etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+
+	timeout = TRACER_TIMEOUT;
+	while (etb_readl(t, ETBR_FORMATTERCTRL) &
+			ETBFF_MANUAL_FLUSH && --timeout)
+		;
+	if (!timeout) {
+		dev_dbg(t->dev, "Waiting for formatter flush to commence "
+				"timed out\n");
+		etb_lock(t);
+		return -EFAULT;
+	}
+
+	etb_writel(t, 0, ETBR_CTRL);
+
+	etb_lock(t);
+
+	t->flags &= ~TRACER_RUNNING;
+
+	return 0;
+}
+
+static int etb_getdatalen(struct tracectx *t)
+{
+	u32 v;
+	int wp;
+
+	v = etb_readl(t, ETBR_STATUS);
+
+	if (v & 1)
+		return t->etb_bufsz;
+
+	wp = etb_readl(t, ETBR_WRITEADDR);
+	return wp;
+}
+
+/* sysrq+v will always stop the running trace and leave it at that */
+static void etm_dump(void)
+{
+	struct tracectx *t = &tracer;
+	u32 first = 0;
+	int length;
+
+	if (!t->etb_regs) {
+		printk(KERN_INFO "No tracing hardware found\n");
+		return;
+	}
+
+	if (trace_isrunning(t))
+		trace_stop(t);
+
+	etb_unlock(t);
+
+	length = etb_getdatalen(t);
+
+	if (length == t->etb_bufsz)
+		first = etb_readl(t, ETBR_WRITEADDR);
+
+	etb_writel(t, first, ETBR_READADDR);
+
+	printk(KERN_INFO "Trace buffer contents length: %d\n", length);
+	printk(KERN_INFO "--- ETB buffer begin ---\n");
+	for (; length; length--)
+		printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
+	printk(KERN_INFO "\n--- ETB buffer end ---\n");
+
+	etb_lock(t);
+}
+
+static void sysrq_etm_dump(int key)
+{
+	if (!mutex_trylock(&tracer.mutex)) {
+		printk(KERN_INFO "Tracing hardware busy\n");
+		return;
+	}
+	dev_dbg(tracer.dev, "Dumping ETB buffer\n");
+	etm_dump();
+	mutex_unlock(&tracer.mutex);
+}
+
+static struct sysrq_key_op sysrq_etm_op = {
+	.handler = sysrq_etm_dump,
+	.help_msg = "ETM buffer dump",
+	.action_msg = "etm",
+};
+
+static int etb_open(struct inode *inode, struct file *file)
+{
+	if (!tracer.etb_regs)
+		return -ENODEV;
+
+	file->private_data = &tracer;
+
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t etb_read(struct file *file, char __user *data,
+		size_t len, loff_t *ppos)
+{
+	int total, i;
+	long length;
+	struct tracectx *t = file->private_data;
+	u32 first = 0;
+	u32 *buf;
+	int wpos;
+	int skip;
+	long wlength;
+	loff_t pos = *ppos;
+
+	mutex_lock(&t->mutex);
+
+	if (trace_isrunning(t)) {
+		length = 0;
+		goto out;
+	}
+
+	etb_unlock(t);
+
+	total = etb_getdatalen(t);
+	if (total == 0 && t->dump_initial_etb)
+		total = t->etb_bufsz;
+	if (total == t->etb_bufsz)
+		first = etb_readl(t, ETBR_WRITEADDR);
+
+	if (pos > total * 4) {
+		skip = 0;
+		wpos = total;
+	} else {
+		skip = (int)pos % 4;
+		wpos = (int)pos / 4;
+	}
+	total -= wpos;
+	first = (first + wpos) % t->etb_bufsz;
+
+	etb_writel(t, first, ETBR_READADDR);
+
+	wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+	length = min(total * 4 - skip, (int)len);
+	buf = vmalloc(wlength * 4);
+
+	dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+		length, pos, wlength, first);
+	dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
+	dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
+	for (i = 0; i < wlength; i++)
+		buf[i] = etb_readl(t, ETBR_READMEM);
+
+	etb_lock(t);
+
+	length -= copy_to_user(data, (u8 *)buf + skip, length);
+	vfree(buf);
+	*ppos = pos + length;
+
+out:
+	mutex_unlock(&t->mutex);
+
+	return length;
+}
+
+static int etb_release(struct inode *inode, struct file *file)
+{
+	/* there's nothing to do here, actually */
+	return 0;
+}
+
+static const struct file_operations etb_fops = {
+	.owner = THIS_MODULE,
+	.read = etb_read,
+	.open = etb_open,
+	.release = etb_release,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice etb_miscdev = {
+	.name = "tracebuf",
+	.minor = 0,
+	.fops = &etb_fops,
+};
+
+static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id)
+{
+	struct tracectx *t = &tracer;
+	int ret = 0;
+
+	ret = amba_request_regions(dev, NULL);
+	if (ret)
+		goto out;
+
+	mutex_lock(&t->mutex);
+	t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
+	if (!t->etb_regs) {
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	t->dev = &dev->dev;
+	t->dump_initial_etb = true;
+	amba_set_drvdata(dev, t);
+
+	etb_unlock(t);
+	t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
+	dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
+
+	/* make sure trace capture is disabled */
+	etb_writel(t, 0, ETBR_CTRL);
+	etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
+	etb_lock(t);
+	mutex_unlock(&t->mutex);
+
+	etb_miscdev.parent = &dev->dev;
+
+	ret = misc_register(&etb_miscdev);
+	if (ret)
+		goto out_unmap;
+
+	/* Get optional clock. Currently used to select clock source on omap3 */
+	t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+	if (IS_ERR(t->emu_clk))
+		dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+	else
+		clk_enable(t->emu_clk);
+
+	dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
+
+out:
+	return ret;
+
+out_unmap:
+	mutex_lock(&t->mutex);
+	amba_set_drvdata(dev, NULL);
+	iounmap(t->etb_regs);
+	t->etb_regs = NULL;
+
+out_release:
+	mutex_unlock(&t->mutex);
+	amba_release_regions(dev);
+
+	return ret;
+}
+
+static int etb_remove(struct amba_device *dev)
+{
+	struct tracectx *t = amba_get_drvdata(dev);
+
+	amba_set_drvdata(dev, NULL);
+
+	iounmap(t->etb_regs);
+	t->etb_regs = NULL;
+
+	if (!IS_ERR(t->emu_clk)) {
+		clk_disable(t->emu_clk);
+		clk_put(t->emu_clk);
+	}
+
+	amba_release_regions(dev);
+
+	return 0;
+}
+
+static struct amba_id etb_ids[] = {
+	{
+		.id	= 0x0003b907,
+		.mask	= 0x0007ffff,
+	},
+	{ 0, 0 },
+};
+
+static struct amba_driver etb_driver = {
+	.drv		= {
+		.name	= "etb",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= etb_probe,
+	.remove		= etb_remove,
+	.id_table	= etb_ids,
+};
+
+/* use a sysfs file "trace_running" to start/stop tracing */
+static ssize_t trace_running_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%x\n", trace_isrunning(&tracer));
+}
+
+static ssize_t trace_running_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int value;
+	int ret;
+
+	if (sscanf(buf, "%u", &value) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (!tracer.etb_regs)
+		ret = -ENODEV;
+	else
+		ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+	mutex_unlock(&tracer.mutex);
+
+	return ret ? : n;
+}
+
+static struct kobj_attribute trace_running_attr =
+	__ATTR(trace_running, 0644, trace_running_show, trace_running_store);
+
+static ssize_t trace_info_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
+	int datalen;
+	int id;
+	int ret;
+
+	mutex_lock(&tracer.mutex);
+	if (tracer.etb_regs) {
+		etb_unlock(&tracer);
+		datalen = etb_getdatalen(&tracer);
+		etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+		etb_ra = etb_readl(&tracer, ETBR_READADDR);
+		etb_st = etb_readl(&tracer, ETBR_STATUS);
+		etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+		etb_lock(&tracer);
+	} else {
+		etb_wa = etb_ra = etb_st = etb_fc = ~0;
+		datalen = -1;
+	}
+
+	ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+			"ETBR_WRITEADDR:\t%08x\n"
+			"ETBR_READADDR:\t%08x\n"
+			"ETBR_STATUS:\t%08x\n"
+			"ETBR_FORMATTERCTRL:\t%08x\n",
+			datalen,
+			tracer.ncmppairs,
+			etb_wa,
+			etb_ra,
+			etb_st,
+			etb_fc
+			);
+
+	for (id = 0; id < tracer.etm_regs_count; id++) {
+		etm_unlock(&tracer, id);
+		etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+		etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+		etm_lock(&tracer, id);
+		ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+			"ETMR_STATUS:\t%08x\n",
+			etm_ctrl,
+			etm_st
+			);
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return ret;
+}
+
+static struct kobj_attribute trace_info_attr =
+	__ATTR(trace_info, 0444, trace_info_show, NULL);
+
+static ssize_t trace_mode_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%d %d\n",
+			!!(tracer.flags & TRACER_CYCLE_ACC),
+			tracer.etm_portsz);
+}
+
+static ssize_t trace_mode_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int cycacc, portsz;
+
+	if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (cycacc)
+		tracer.flags |= TRACER_CYCLE_ACC;
+	else
+		tracer.flags &= ~TRACER_CYCLE_ACC;
+
+	tracer.etm_portsz = portsz & 0x0f;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_mode_attr =
+	__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
+
+static ssize_t trace_contextid_size_show(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 char *buf)
+{
+	/* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
+	return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
+}
+
+static ssize_t trace_contextid_size_store(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  const char *buf, size_t n)
+{
+	unsigned int contextid_size;
+
+	if (sscanf(buf, "%u", &contextid_size) != 1)
+		return -EINVAL;
+
+	if (contextid_size == 3 || contextid_size > 4)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.etm_contextid_size = fls(contextid_size);
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_contextid_size_attr =
+	__ATTR(trace_contextid_size, 0644,
+		trace_contextid_size_show, trace_contextid_size_store);
+
+static ssize_t trace_branch_output_show(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
+}
+
+static ssize_t trace_branch_output_store(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t n)
+{
+	unsigned int branch_output;
+
+	if (sscanf(buf, "%u", &branch_output) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (branch_output) {
+		tracer.flags |= TRACER_BRANCHOUTPUT;
+		/* Branch broadcasting is incompatible with the return stack */
+		tracer.flags &= ~TRACER_RETURN_STACK;
+	} else {
+		tracer.flags &= ~TRACER_BRANCHOUTPUT;
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_branch_output_attr =
+	__ATTR(trace_branch_output, 0644,
+		trace_branch_output_show, trace_branch_output_store);
+
+static ssize_t trace_return_stack_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
+}
+
+static ssize_t trace_return_stack_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int return_stack;
+
+	if (sscanf(buf, "%u", &return_stack) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (return_stack) {
+		tracer.flags |= TRACER_RETURN_STACK;
+		/* Return stack is incompatible with branch broadcasting */
+		tracer.flags &= ~TRACER_BRANCHOUTPUT;
+	} else {
+		tracer.flags &= ~TRACER_RETURN_STACK;
+	}
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_return_stack_attr =
+	__ATTR(trace_return_stack, 0644,
+		trace_return_stack_show, trace_return_stack_store);
+
+static ssize_t trace_timestamp_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
+}
+
+static ssize_t trace_timestamp_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned int timestamp;
+
+	if (sscanf(buf, "%u", &timestamp) != 1)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	if (timestamp)
+		tracer.flags |= TRACER_TIMESTAMP;
+	else
+		tracer.flags &= ~TRACER_TIMESTAMP;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+static struct kobj_attribute trace_timestamp_attr =
+	__ATTR(trace_timestamp, 0644,
+		trace_timestamp_show, trace_timestamp_store);
+
+static ssize_t trace_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%08lx %08lx\n",
+			tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start, range_end;
+
+	if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.range_start = range_start;
+	tracer.range_end = range_end;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+	__ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	unsigned long range_start;
+	u64 range_end;
+	mutex_lock(&tracer.mutex);
+	range_start = tracer.data_range_start;
+	range_end = tracer.data_range_end;
+	if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+		range_end = 0x100000000ULL;
+	mutex_unlock(&tracer.mutex);
+	return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t n)
+{
+	unsigned long range_start;
+	u64 range_end;
+
+	if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+		return -EINVAL;
+
+	mutex_lock(&tracer.mutex);
+	tracer.data_range_start = range_start;
+	tracer.data_range_end = (unsigned long)range_end;
+	if (range_end)
+		tracer.flags |= TRACER_TRACE_DATA;
+	else
+		tracer.flags &= ~TRACER_TRACE_DATA;
+	mutex_unlock(&tracer.mutex);
+
+	return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+	__ATTR(trace_data_range, 0644,
+		trace_data_range_show, trace_data_range_store);
+
+static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
+{
+	struct tracectx *t = &tracer;
+	int ret = 0;
+	void __iomem **new_regs;
+	int new_count;
+	u32 etmccr;
+	u32 etmidr;
+	u32 etmccer = 0;
+	u8 etm_version = 0;
+
+	mutex_lock(&t->mutex);
+	new_count = t->etm_regs_count + 1;
+	new_regs = krealloc(t->etm_regs,
+				sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
+
+	if (!new_regs) {
+		dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	t->etm_regs = new_regs;
+
+	ret = amba_request_regions(dev, NULL);
+	if (ret)
+		goto out;
+
+	t->etm_regs[t->etm_regs_count] =
+		ioremap_nocache(dev->res.start, resource_size(&dev->res));
+	if (!t->etm_regs[t->etm_regs_count]) {
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
+
+	t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
+	t->etm_portsz = 1;
+	t->etm_contextid_size = 3;
+
+	etm_unlock(t, t->etm_regs_count);
+	(void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
+	/* dummy first read */
+	(void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
+
+	etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
+	t->ncmppairs = etmccr & 0xf;
+	if (etmccr & ETMCCR_ETMIDR_PRESENT) {
+		etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
+		etm_version = ETMIDR_VERSION(etmidr);
+		if (etm_version >= ETMIDR_VERSION_3_1)
+			etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
+	}
+	etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+	etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+	etm_lock(t, t->etm_regs_count);
+
+	ret = sysfs_create_file(&dev->dev.kobj,
+			&trace_running_attr.attr);
+	if (ret)
+		goto out_unmap;
+
+	/* failing to create any of these two is not fatal */
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
+
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
+
+	ret = sysfs_create_file(&dev->dev.kobj,
+				&trace_contextid_size_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev,
+			"Failed to create trace_contextid_size in sysfs\n");
+
+	ret = sysfs_create_file(&dev->dev.kobj,
+				&trace_branch_output_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev,
+			"Failed to create trace_branch_output in sysfs\n");
+
+	if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_return_stack_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+			      "Failed to create trace_return_stack in sysfs\n");
+	}
+
+	if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_timestamp_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+				"Failed to create trace_timestamp in sysfs\n");
+	}
+
+	ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+	if (ret)
+		dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+	if (etm_version < ETMIDR_VERSION_PFT_1_0) {
+		ret = sysfs_create_file(&dev->dev.kobj,
+					&trace_data_range_attr.attr);
+		if (ret)
+			dev_dbg(&dev->dev,
+				"Failed to create trace_data_range in sysfs\n");
+	} else {
+		tracer.flags &= ~TRACER_TRACE_DATA;
+	}
+
+	dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+	/* Enable formatter if there are multiple trace sources */
+	if (new_count > 1)
+		t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+	t->etm_regs_count = new_count;
+
+out:
+	mutex_unlock(&t->mutex);
+	return ret;
+
+out_unmap:
+	amba_set_drvdata(dev, NULL);
+	iounmap(t->etm_regs[t->etm_regs_count]);
+
+out_release:
+	amba_release_regions(dev);
+
+	mutex_unlock(&t->mutex);
+	return ret;
+}
+
+static int etm_remove(struct amba_device *dev)
+{
+	int i;
+	struct tracectx *t = &tracer;
+	void __iomem	*etm_regs = amba_get_drvdata(dev);
+
+	sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+	sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
+
+	amba_set_drvdata(dev, NULL);
+
+	mutex_lock(&t->mutex);
+	for (i = 0; i < t->etm_regs_count; i++)
+		if (t->etm_regs[i] == etm_regs)
+			break;
+	for (; i < t->etm_regs_count - 1; i++)
+		t->etm_regs[i] = t->etm_regs[i + 1];
+	t->etm_regs_count--;
+	if (!t->etm_regs_count) {
+		kfree(t->etm_regs);
+		t->etm_regs = NULL;
+	}
+	mutex_unlock(&t->mutex);
+
+	iounmap(etm_regs);
+	amba_release_regions(dev);
+
+	return 0;
+}
+
+static struct amba_id etm_ids[] = {
+	{
+		.id	= 0x0003b921,
+		.mask	= 0x0007ffff,
+	},
+	{
+		.id	= 0x0003b950,
+		.mask	= 0x0007ffff,
+	},
+	{ 0, 0 },
+};
+
+static struct amba_driver etm_driver = {
+	.drv		= {
+		.name   = "etm",
+		.owner  = THIS_MODULE,
+	},
+	.probe		= etm_probe,
+	.remove		= etm_remove,
+	.id_table	= etm_ids,
+};
+
+static int __init etm_init(void)
+{
+	int retval;
+
+	mutex_init(&tracer.mutex);
+
+	retval = amba_driver_register(&etb_driver);
+	if (retval) {
+		printk(KERN_ERR "Failed to register etb\n");
+		return retval;
+	}
+
+	retval = amba_driver_register(&etm_driver);
+	if (retval) {
+		amba_driver_unregister(&etb_driver);
+		printk(KERN_ERR "Failed to probe etm\n");
+		return retval;
+	}
+
+	/* not being able to install this handler is not fatal */
+	(void)register_sysrq_key('v', &sysrq_etm_op);
+
+	return 0;
+}
+
+device_initcall(etm_init);
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiq.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiq.c
new file mode 100644
index 0000000..c32f845
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiq.c
@@ -0,0 +1,146 @@
+/*
+ *  linux/arch/arm/kernel/fiq.c
+ *
+ *  Copyright (C) 1998 Russell King
+ *  Copyright (C) 1998, 1999 Phil Blundell
+ *
+ *  FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
+ *
+ *  FIQ support re-written by Russell King to be more generic
+ *
+ * We now properly support a method by which the FIQ handlers can
+ * be stacked onto the vector.  We still do not support sharing
+ * the FIQ vector itself.
+ *
+ * Operation is as follows:
+ *  1. Owner A claims FIQ:
+ *     - default_fiq relinquishes control.
+ *  2. Owner A:
+ *     - inserts code.
+ *     - sets any registers,
+ *     - enables FIQ.
+ *  3. Owner B claims FIQ:
+ *     - if owner A has a relinquish function.
+ *       - disable FIQs.
+ *       - saves any registers.
+ *       - returns zero.
+ *  4. Owner B:
+ *     - inserts code.
+ *     - sets any registers,
+ *     - enables FIQ.
+ *  5. Owner B releases FIQ:
+ *     - Owner A is asked to reacquire FIQ:
+ *	 - inserts code.
+ *	 - restores saved registers.
+ *	 - enables FIQ.
+ *  6. Goto 3
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/fiq.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+
+static unsigned long no_fiq_insn;
+
+/* Default reacquire function
+ * - we always relinquish FIQ control
+ * - we always reacquire FIQ control
+ */
+static int fiq_def_op(void *ref, int relinquish)
+{
+	if (!relinquish)
+		set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
+
+	return 0;
+}
+
+static struct fiq_handler default_owner = {
+	.name	= "default",
+	.fiq_op = fiq_def_op,
+};
+
+static struct fiq_handler *current_fiq = &default_owner;
+
+int show_fiq_list(struct seq_file *p, int prec)
+{
+	if (current_fiq != &default_owner)
+		seq_printf(p, "%*s:              %s\n", prec, "FIQ",
+			current_fiq->name);
+
+	return 0;
+}
+
+void set_fiq_handler(void *start, unsigned int length)
+{
+#if defined(CONFIG_CPU_USE_DOMAINS)
+	memcpy((void *)0xffff001c, start, length);
+#else
+	memcpy(vectors_page + 0x1c, start, length);
+#endif
+	flush_icache_range(0xffff001c, 0xffff001c + length);
+	if (!vectors_high())
+		flush_icache_range(0x1c, 0x1c + length);
+}
+
+int claim_fiq(struct fiq_handler *f)
+{
+	int ret = 0;
+
+	if (current_fiq) {
+		ret = -EBUSY;
+
+		if (current_fiq->fiq_op != NULL)
+			ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
+	}
+
+	if (!ret) {
+		f->next = current_fiq;
+		current_fiq = f;
+	}
+
+	return ret;
+}
+
+void release_fiq(struct fiq_handler *f)
+{
+	if (current_fiq != f) {
+		printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
+		       f->name, current_fiq->name);
+		dump_stack();
+		return;
+	}
+
+	do
+		current_fiq = current_fiq->next;
+	while (current_fiq->fiq_op(current_fiq->dev_id, 0));
+}
+
+void enable_fiq(int fiq)
+{
+	enable_irq(fiq + FIQ_START);
+}
+
+void disable_fiq(int fiq)
+{
+	disable_irq(fiq + FIQ_START);
+}
+
+EXPORT_SYMBOL(set_fiq_handler);
+EXPORT_SYMBOL(__set_fiq_regs);	/* defined in fiqasm.S */
+EXPORT_SYMBOL(__get_fiq_regs);	/* defined in fiqasm.S */
+EXPORT_SYMBOL(claim_fiq);
+EXPORT_SYMBOL(release_fiq);
+EXPORT_SYMBOL(enable_fiq);
+EXPORT_SYMBOL(disable_fiq);
+
+void __init init_FIQ(void)
+{
+	no_fiq_insn = *(unsigned long *)0xffff001c;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiqasm.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiqasm.S
new file mode 100644
index 0000000..207f9d6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/fiqasm.S
@@ -0,0 +1,49 @@
+/*
+ *  linux/arch/arm/kernel/fiqasm.S
+ *
+ *  Derived from code originally in linux/arch/arm/kernel/fiq.c:
+ *
+ *  Copyright (C) 1998 Russell King
+ *  Copyright (C) 1998, 1999 Phil Blundell
+ *  Copyright (C) 2011, Linaro Limited
+ *
+ *  FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
+ *
+ *  FIQ support re-written by Russell King to be more generic
+ *
+ *  v7/Thumb-2 compatibility modifications by Linaro Limited, 2011.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Taking an interrupt in FIQ mode is death, so both these functions
+ * disable irqs for the duration.
+ */
+
+ENTRY(__set_fiq_regs)
+	mov	r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+	mrs	r1, cpsr
+	msr	cpsr_c, r2	@ select FIQ mode
+	mov	r0, r0		@ avoid hazard prior to ARMv4
+	ldmia	r0!, {r8 - r12}
+	ldr	sp, [r0], #4
+	ldr	lr, [r0]
+	msr	cpsr_c, r1	@ return to SVC mode
+	mov	r0, r0		@ avoid hazard prior to ARMv4
+	mov	pc, lr
+ENDPROC(__set_fiq_regs)
+
+ENTRY(__get_fiq_regs)
+	mov	r2, #PSR_I_BIT | PSR_F_BIT | FIQ_MODE
+	mrs	r1, cpsr
+	msr	cpsr_c, r2	@ select FIQ mode
+	mov	r0, r0		@ avoid hazard prior to ARMv4
+	stmia	r0!, {r8 - r12}
+	str	sp, [r0], #4
+	str	lr, [r0]
+	msr	cpsr_c, r1	@ return to SVC mode
+	mov	r0, r0		@ avoid hazard prior to ARMv4
+	mov	pc, lr
+ENDPROC(__get_fiq_regs)
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/ftrace.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/ftrace.c
new file mode 100644
index 0000000..6a740a9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/ftrace.c
@@ -0,0 +1,260 @@
+/*
+ * Dynamic function tracing support.
+ *
+ * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
+ * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
+ *
+ * For licencing details, see COPYING.
+ *
+ * Defines low-level handling of mcount calls when the kernel
+ * is compiled with the -pg flag. When using dynamic ftrace, the
+ * mcount call-sites get patched with NOP till they are enabled.
+ * All code mutation routines here are called under stop_machine().
+ */
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/opcodes.h>
+#include <asm/ftrace.h>
+
+#include "insn.h"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define	NOP		0xf85deb04	/* pop.w {lr} */
+#else
+#define	NOP		0xe8bd4000	/* pop {lr} */
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_OLD_MCOUNT
+#define OLD_MCOUNT_ADDR	((unsigned long) mcount)
+#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
+
+#define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
+
+static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
+{
+	return rec->arch.old_mcount ? OLD_NOP : NOP;
+}
+
+static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+{
+	if (!rec->arch.old_mcount)
+		return addr;
+
+	if (addr == MCOUNT_ADDR)
+		addr = OLD_MCOUNT_ADDR;
+	else if (addr == FTRACE_ADDR)
+		addr = OLD_FTRACE_ADDR;
+
+	return addr;
+}
+#else
+static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
+{
+	return NOP;
+}
+
+static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+{
+	return addr;
+}
+#endif
+
+int ftrace_arch_code_modify_prepare(void)
+{
+	set_kernel_text_rw();
+	set_all_modules_text_rw();
+	return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+	set_all_modules_text_ro();
+	set_kernel_text_ro();
+	return 0;
+}
+
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+	return arm_gen_branch_link(pc, addr);
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long old,
+			      unsigned long new, bool validate)
+{
+	unsigned long replaced;
+
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+		old = __opcode_to_mem_thumb32(old);
+		new = __opcode_to_mem_thumb32(new);
+	} else {
+		old = __opcode_to_mem_arm(old);
+		new = __opcode_to_mem_arm(new);
+	}
+
+	if (validate) {
+		if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+			return -EFAULT;
+
+		if (replaced != old)
+			return -EINVAL;
+	}
+
+	if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
+		return -EPERM;
+
+	flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+	return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	unsigned long pc;
+	unsigned long new;
+	int ret;
+
+	pc = (unsigned long)&ftrace_call;
+	new = ftrace_call_replace(pc, (unsigned long)func);
+
+	ret = ftrace_modify_code(pc, 0, new, false);
+
+#ifdef CONFIG_OLD_MCOUNT
+	if (!ret) {
+		pc = (unsigned long)&ftrace_call_old;
+		new = ftrace_call_replace(pc, (unsigned long)func);
+
+		ret = ftrace_modify_code(pc, 0, new, false);
+	}
+#endif
+
+	return ret;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	unsigned long new, old;
+	unsigned long ip = rec->ip;
+
+	old = ftrace_nop_replace(rec);
+	new = ftrace_call_replace(ip, adjust_address(rec, addr));
+
+	return ftrace_modify_code(rec->ip, old, new, true);
+}
+
+int ftrace_make_nop(struct module *mod,
+		    struct dyn_ftrace *rec, unsigned long addr)
+{
+	unsigned long ip = rec->ip;
+	unsigned long old;
+	unsigned long new;
+	int ret;
+
+	old = ftrace_call_replace(ip, adjust_address(rec, addr));
+	new = ftrace_nop_replace(rec);
+	ret = ftrace_modify_code(ip, old, new, true);
+
+#ifdef CONFIG_OLD_MCOUNT
+	if (ret == -EINVAL && addr == MCOUNT_ADDR) {
+		rec->arch.old_mcount = true;
+
+		old = ftrace_call_replace(ip, adjust_address(rec, addr));
+		new = ftrace_nop_replace(rec);
+		ret = ftrace_modify_code(ip, old, new, true);
+	}
+#endif
+
+	return ret;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+	*(unsigned long *)data = 0;
+
+	return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+			   unsigned long frame_pointer)
+{
+	unsigned long return_hooker = (unsigned long) &return_to_handler;
+	struct ftrace_graph_ent trace;
+	unsigned long old;
+	int err;
+
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		return;
+
+	old = *parent;
+	*parent = return_hooker;
+
+	trace.func = self_addr;
+	trace.depth = current->curr_ret_stack + 1;
+
+	/* Only trace if the calling function expects to */
+	if (!ftrace_graph_entry(&trace)) {
+		*parent = old;
+		return;
+	}
+
+	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+				       frame_pointer);
+	if (err == -EBUSY) {
+		*parent = old;
+		return;
+	}
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+extern unsigned long ftrace_graph_call_old;
+extern void ftrace_graph_caller_old(void);
+
+static int __ftrace_modify_caller(unsigned long *callsite,
+				  void (*func) (void), bool enable)
+{
+	unsigned long caller_fn = (unsigned long) func;
+	unsigned long pc = (unsigned long) callsite;
+	unsigned long branch = arm_gen_branch(pc, caller_fn);
+	unsigned long nop = 0xe1a00000;	/* mov r0, r0 */
+	unsigned long old = enable ? nop : branch;
+	unsigned long new = enable ? branch : nop;
+
+	return ftrace_modify_code(pc, old, new, true);
+}
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+	int ret;
+
+	ret = __ftrace_modify_caller(&ftrace_graph_call,
+				     ftrace_graph_caller,
+				     enable);
+
+#ifdef CONFIG_OLD_MCOUNT
+	if (!ret)
+		ret = __ftrace_modify_caller(&ftrace_graph_call_old,
+					     ftrace_graph_caller_old,
+					     enable);
+#endif
+
+	return ret;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-common.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-common.S
new file mode 100644
index 0000000..3c36e04
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-common.S
@@ -0,0 +1,205 @@
+/*
+ *  linux/arch/arm/kernel/head-common.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define ATAG_CORE 0x54410001
+#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
+#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define OF_DT_MAGIC 0xd00dfeed
+#else
+#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
+#endif
+
+/*
+ * Exception handling.  Something went wrong and we can't proceed.  We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+	__HEAD
+
+/* Determine validity of the r2 atags pointer.  The heuristic requires
+ * that the pointer be aligned, in the first 16k of physical RAM and
+ * that the ATAG_CORE marker is first and present.  If CONFIG_OF_FLATTREE
+ * is selected, then it will also accept a dtb pointer.  Future revisions
+ * of this function may be more lenient with the physical address and
+ * may also be able to move the ATAGS block if necessary.
+ *
+ * Returns:
+ *  r2 either valid atags pointer, valid dtb pointer, or zero
+ *  r5, r6 corrupted
+ */
+__vet_atags:
+	tst	r2, #0x3			@ aligned?
+	bne	1f
+
+	ldr	r5, [r2, #0]
+#ifdef CONFIG_OF_FLATTREE
+	ldr	r6, =OF_DT_MAGIC		@ is it a DTB?
+	cmp	r5, r6
+	beq	2f
+#endif
+	cmp	r5, #ATAG_CORE_SIZE		@ is first tag ATAG_CORE?
+	cmpne	r5, #ATAG_CORE_SIZE_EMPTY
+	bne	1f
+	ldr	r5, [r2, #4]
+	ldr	r6, =ATAG_CORE
+	cmp	r5, r6
+	bne	1f
+
+2:	mov	pc, lr				@ atag/dtb pointer is ok
+
+1:	mov	r2, #0
+	mov	pc, lr
+ENDPROC(__vet_atags)
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ *  r0  = cp#15 control register
+ *  r1  = machine ID
+ *  r2  = atags/dtb pointer
+ *  r9  = processor ID
+ */
+	__INIT
+__mmap_switched:
+	adr	r3, __mmap_switched_data
+
+	ldmia	r3!, {r4, r5, r6, r7}
+	cmp	r4, r5				@ Copy data segment if needed
+1:	cmpne	r5, r6
+	ldrne	fp, [r4], #4
+	strne	fp, [r5], #4
+	bne	1b
+
+	mov	fp, #0				@ Clear BSS (and zero fp)
+1:	cmp	r6, r7
+	strcc	fp, [r6],#4
+	bcc	1b
+
+ ARM(	ldmia	r3, {r4, r5, r6, r7, sp})
+ THUMB(	ldmia	r3, {r4, r5, r6, r7}	)
+ THUMB(	ldr	sp, [r3, #16]		)
+	str	r9, [r4]			@ Save processor ID
+	str	r1, [r5]			@ Save machine type
+	str	r2, [r6]			@ Save atags pointer
+	bic	r4, r0, #CR_A			@ Clear 'A' bit
+	stmia	r7, {r0, r4}			@ Save control register values
+	b	start_kernel
+ENDPROC(__mmap_switched)
+
+	.align	2
+	.type	__mmap_switched_data, %object
+__mmap_switched_data:
+	.long	__data_loc			@ r4
+	.long	_sdata				@ r5
+	.long	__bss_start			@ r6
+	.long	_end				@ r7
+	.long	processor_id			@ r4
+	.long	__machine_arch_type		@ r5
+	.long	__atags_pointer			@ r6
+	.long	cr_alignment			@ r7
+	.long	init_thread_union + THREAD_START_SP @ sp/*org THREAD_START_SP*/
+
+	.size	__mmap_switched_data, . - __mmap_switched_data
+
+/*
+ * This provides a C-API version of __lookup_processor_type
+ */
+ENTRY(lookup_processor_type)
+	stmfd	sp!, {r4 - r6, r9, lr}
+	mov	r9, r0
+	bl	__lookup_processor_type
+	mov	r0, r5
+	ldmfd	sp!, {r4 - r6, r9, pc}
+ENDPROC(lookup_processor_type)
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list.  Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space).  We have to
+ * calculate the offset.
+ *
+ *	r9 = cpuid
+ * Returns:
+ *	r3, r4, r6 corrupted
+ *	r5 = proc_info pointer in physical address space
+ *	r9 = cpuid (preserved)
+ */
+	__CPUINIT
+__lookup_processor_type:
+	adr	r3, __lookup_processor_type_data
+	ldmia	r3, {r4 - r6}
+	sub	r3, r3, r4			@ get offset between virt&phys
+	add	r5, r5, r3			@ convert virt addresses to
+	add	r6, r6, r3			@ physical address space
+1:	ldmia	r5, {r3, r4}			@ value, mask
+	and	r4, r4, r9			@ mask wanted bits
+	teq	r3, r4
+	beq	2f
+	add	r5, r5, #PROC_INFO_SZ		@ sizeof(proc_info_list)
+	cmp	r5, r6
+	blo	1b
+	mov	r5, #0				@ unknown processor
+2:	mov	pc, lr
+ENDPROC(__lookup_processor_type)
+
+/*
+ * Look in <asm/procinfo.h> for information about the __proc_info structure.
+ */
+	.align	2
+	.type	__lookup_processor_type_data, %object
+__lookup_processor_type_data:
+	.long	.
+	.long	__proc_info_begin
+	.long	__proc_info_end
+	.size	__lookup_processor_type_data, . - __lookup_processor_type_data
+
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+	adr	r0, str_p1
+	bl	printascii
+	mov	r0, r9
+	bl	printhex8
+	adr	r0, str_p2
+	bl	printascii
+	b	__error
+str_p1:	.asciz	"\nError: unrecognized/unsupported processor variant (0x"
+str_p2:	.asciz	").\n"
+	.align
+#endif
+ENDPROC(__error_p)
+
+__error:
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+	mov	r0, #0x02000000
+	mov	r3, #0x11
+	orr	r3, r3, r3, lsl #8
+	orr	r3, r3, r3, lsl #16
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+#endif
+1:	mov	r0, r0
+	b	1b
+ENDPROC(__error)
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-nommu.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-nommu.S
new file mode 100644
index 0000000..278cfc1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head-nommu.S
@@ -0,0 +1,98 @@
+/*
+ *  linux/arch/arm/kernel/head-nommu.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (C) 2003-2006 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Common kernel startup code (non-paged MM)
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/cp15.h>
+#include <asm/thread_info.h>
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code.  The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr.
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ */
+	.arm
+
+	__HEAD
+ENTRY(stext)
+
+ THUMB(	adr	r9, BSYM(1f)	)	@ Kernel is always entered in ARM.
+ THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
+ THUMB(	.thumb			)	@ switch to Thumb now.
+ THUMB(1:			)
+
+	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+						@ and irqs disabled
+#ifndef CONFIG_CPU_CP15
+	ldr	r9, =CONFIG_PROCESSOR_ID
+#else
+	mrc	p15, 0, r9, c0, c0		@ get processor id
+#endif
+	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
+	movs	r10, r5				@ invalid processor (r5=0)?
+	beq	__error_p				@ yes, error 'p'
+
+	adr	lr, BSYM(__after_proc_init)	@ return (PIC) address
+ ARM(	add	pc, r10, #PROCINFO_INITFUNC	)
+ THUMB(	add	r12, r10, #PROCINFO_INITFUNC	)
+ THUMB(	mov	pc, r12				)
+ENDPROC(stext)
+
+/*
+ * Set the Control Register and Read the process ID.
+ */
+__after_proc_init:
+#ifdef CONFIG_CPU_CP15
+	/*
+	 * CP15 system control register value returned in r0 from
+	 * the CPU init function.
+	 */
+#ifdef CONFIG_ALIGNMENT_TRAP
+	orr	r0, r0, #CR_A
+#else
+	bic	r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+	bic	r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+	bic	r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+	bic	r0, r0, #CR_I
+#endif
+#ifdef CONFIG_CPU_HIGH_VECTOR
+	orr	r0, r0, #CR_V
+#else
+	bic	r0, r0, #CR_V
+#endif
+	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+#endif /* CONFIG_CPU_CP15 */
+
+	b	__mmap_switched			@ clear the BSS and jump
+						@ to start_kernel
+ENDPROC(__after_proc_init)
+	.ltorg
+
+#include "head-common.S"
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/head.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head.S
new file mode 100644
index 0000000..7170ec8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/head.S
@@ -0,0 +1,632 @@
+/*
+ *  linux/arch/arm/kernel/head.S
+ *
+ *  Copyright (C) 1994-2002 Russell King
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Kernel startup code for all 32-bit CPUs
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/cp15.h>
+#include <asm/domain.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_DEBUG_LL
+#include <mach/debug-macro.S>
+#endif
+
+/*
+ * swapper_pg_dir is the virtual address of the initial page table.
+ * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
+ * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
+ * the least significant 16 bits to be 0x8000, but we could probably
+ * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
+ */
+#define KERNEL_RAM_VADDR	(PAGE_OFFSET + TEXT_OFFSET)
+#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
+#error KERNEL_RAM_VADDR must start at 0xXXXX8000
+#endif
+
+#ifdef CONFIG_ARM_LPAE
+	/* LPAE requires an additional page for the PGD */
+#define PG_DIR_SIZE	0x5000
+#define PMD_ORDER	3
+#else
+#define PG_DIR_SIZE	0x4000
+#define PMD_ORDER	2
+#endif
+
+	.globl	swapper_pg_dir
+	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+
+	.macro	pgtbl, rd, phys
+	add	\rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+	.endm
+
+#ifdef CONFIG_XIP_KERNEL
+#define KERNEL_START	XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
+#define KERNEL_END	_edata_loc
+#else
+#define KERNEL_START	KERNEL_RAM_VADDR
+#define KERNEL_END	_end
+#endif
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code.  The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr, r2 = atags or dtb pointer.
+ *
+ * This code is mostly position independent, so if you link the kernel at
+ * 0xc0008000, you call this at __pa(0xc0008000).
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ * We're trying to keep crap to a minimum; DO NOT add any machine specific
+ * crap here - that's what the boot loader (or in extreme, well justified
+ * circumstances, zImage) is for.
+ */
+	.arm
+
+	__HEAD
+ENTRY(stext)
+#ifdef CONFIG_BOOT_WITHOUT_UBOOT
+	ldr r2, =CONFIG_BOOT_WITHOUT_UBOOT_ADDR
+	ldr r0, [r2]
+	ldr r1, [r2, #4]
+	ldr r2, [r2, #8]
+#endif
+
+ THUMB(	adr	r9, BSYM(1f)	)	@ Kernel is always entered in ARM.
+ THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
+ THUMB(	.thumb			)	@ switch to Thumb now.
+ THUMB(1:			)
+
+	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+						@ and irqs disabled
+	mrc	p15, 0, r9, c0, c0		@ get processor id
+	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
+	movs	r10, r5				@ invalid processor (r5=0)?
+ THUMB( it	eq )		@ force fixup-able long branch encoding
+	beq	__error_p			@ yes, error 'p'
+
+#ifdef CONFIG_ARM_LPAE
+	mrc	p15, 0, r3, c0, c1, 4		@ read ID_MMFR0
+	and	r3, r3, #0xf			@ extract VMSA support
+	cmp	r3, #5				@ long-descriptor translation table format?
+ THUMB( it	lo )				@ force fixup-able long branch encoding
+	blo	__error_p			@ only classic page table format
+#endif
+
+#ifndef CONFIG_XIP_KERNEL
+	adr	r3, 2f
+	ldmia	r3, {r4, r8}
+	sub	r4, r3, r4			@ (PHYS_OFFSET - PAGE_OFFSET)
+	add	r8, r8, r4			@ PHYS_OFFSET
+#else
+	ldr	r8, =PHYS_OFFSET		@ always constant in this case
+#endif
+
+	/*
+	 * r1 = machine no, r2 = atags or dtb,
+	 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
+	 */
+	bl	__vet_atags
+#ifdef CONFIG_SMP_ON_UP
+	bl	__fixup_smp
+#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+	bl	__fixup_pv_table
+#endif
+	bl	__create_page_tables
+
+	/*
+	 * The following calls CPU specific code in a position independent
+	 * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
+	 * xxx_proc_info structure selected by __lookup_processor_type
+	 * above.  On return, the CPU will be ready for the MMU to be
+	 * turned on, and r0 will hold the CPU control register value.
+	 */
+	ldr	r13, =__mmap_switched		@ address to jump to after
+						@ mmu has been enabled
+	adr	lr, BSYM(1f)			@ return (PIC) address
+	mov	r8, r4				@ set TTBR1 to swapper_pg_dir
+ ARM(	add	pc, r10, #PROCINFO_INITFUNC	)
+ THUMB(	add	r12, r10, #PROCINFO_INITFUNC	)
+ THUMB(	mov	pc, r12				)
+1:	b	__enable_mmu
+ENDPROC(stext)
+	.ltorg
+#ifndef CONFIG_XIP_KERNEL
+2:	.long	.
+	.long	PAGE_OFFSET
+#endif
+
+/*
+ * Setup the initial page tables.  We only setup the barest
+ * amount which are required to get the kernel running, which
+ * generally means mapping in the kernel code.
+ *
+ * r8 = phys_offset, r9 = cpuid, r10 = procinfo
+ *
+ * Returns:
+ *  r0, r3, r5-r7 corrupted
+ *  r4 = physical page table address
+ */
+__create_page_tables:
+	pgtbl	r4, r8				@ page table address
+
+	/*
+	 * Clear the swapper page table
+	 */
+	mov	r0, r4
+	mov	r3, #0
+	add	r6, r0, #PG_DIR_SIZE
+1:	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	str	r3, [r0], #4
+	teq	r0, r6
+	bne	1b
+
+#ifdef CONFIG_ARM_LPAE
+	/*
+	 * Build the PGD table (first level) to point to the PMD table. A PGD
+	 * entry is 64-bit wide.
+	 */
+	mov	r0, r4
+	add	r3, r4, #0x1000			@ first PMD table address
+	orr	r3, r3, #3			@ PGD block type
+	mov	r6, #4				@ PTRS_PER_PGD
+	mov	r7, #1 << (55 - 32)		@ L_PGD_SWAPPER
+1:	str	r3, [r0], #4			@ set bottom PGD entry bits
+	str	r7, [r0], #4			@ set top PGD entry bits
+	add	r3, r3, #0x1000			@ next PMD table
+	subs	r6, r6, #1
+	bne	1b
+
+	add	r4, r4, #0x1000			@ point to the PMD tables
+#endif
+
+	ldr	r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
+
+	/*
+	 * Create identity mapping to cater for __enable_mmu.
+	 * This identity mapping will be removed by paging_init().
+	 */
+	adr	r0, __turn_mmu_on_loc
+	ldmia	r0, {r3, r5, r6}
+	sub	r0, r0, r3			@ virt->phys offset
+	add	r5, r5, r0			@ phys __turn_mmu_on
+	add	r6, r6, r0			@ phys __turn_mmu_on_end
+	mov	r5, r5, lsr #SECTION_SHIFT
+	mov	r6, r6, lsr #SECTION_SHIFT
+
+1:	orr	r3, r7, r5, lsl #SECTION_SHIFT	@ flags + kernel base
+	str	r3, [r4, r5, lsl #PMD_ORDER]	@ identity mapping
+	cmp	r5, r6
+	addlo	r5, r5, #1			@ next section
+	blo	1b
+
+	/*
+	 * Now setup the pagetables for our kernel direct
+	 * mapped region.
+	 */
+	mov	r3, pc
+	mov	r3, r3, lsr #SECTION_SHIFT
+	orr	r3, r7, r3, lsl #SECTION_SHIFT
+	add	r0, r4,  #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+	str	r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
+	ldr	r6, =(KERNEL_END - 1)
+	add	r0, r0, #1 << PMD_ORDER
+	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
+1:	cmp	r0, r6
+	add	r3, r3, #1 << SECTION_SHIFT
+	strls	r3, [r0], #1 << PMD_ORDER
+	bls	1b
+
+#ifdef CONFIG_XIP_KERNEL
+	/*
+	 * Map some ram to cover our .data and .bss areas.
+	 */
+	add	r3, r8, #TEXT_OFFSET
+	orr	r3, r3, r7
+	add	r0, r4,  #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+	str	r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]!
+	ldr	r6, =(_end - 1)
+	add	r0, r0, #4
+	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
+1:	cmp	r0, r6
+	add	r3, r3, #1 << 20
+	strls	r3, [r0], #4
+	bls	1b
+#endif
+
+	/*
+	 * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
+	 * of ram if boot params address is not specified.
+	 * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
+	 */
+	mov	r0, r2, lsr #SECTION_SHIFT
+	movs	r0, r0, lsl #SECTION_SHIFT
+	moveq	r0, r8
+	sub	r3, r0, r8
+	ldr r6, =PAGE_OFFSET
+	add	r3, r3, r6
+	add	r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
+	orr	r6, r7, r0
+	str	r6, [r3], #1 << PMD_ORDER
+	add	r6, r6, #1 << SECTION_SHIFT
+	str	r6, [r3]
+
+#ifdef CONFIG_DEBUG_LL
+#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
+	/*
+	 * Map in IO space for serial debugging.
+	 * This allows debug messages to be output
+	 * via a serial console before paging_init.
+	 */
+	addruart r7, r3, r0
+
+	mov	r3, r3, lsr #SECTION_SHIFT
+	mov	r3, r3, lsl #PMD_ORDER
+
+	add	r0, r4, r3
+	rsb	r3, r3, #0x4000			@ PTRS_PER_PGD*sizeof(long)
+	cmp	r3, #0x0800			@ limit to 512MB
+	movhi	r3, #0x0800
+	add	r6, r0, r3
+	mov	r3, r7, lsr #SECTION_SHIFT
+	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
+	orr	r3, r7, r3, lsl #SECTION_SHIFT
+#ifdef CONFIG_ARM_LPAE
+	mov	r7, #1 << (54 - 32)		@ XN
+#else
+	orr	r3, r3, #PMD_SECT_XN
+#endif
+1:	str	r3, [r0], #4
+#ifdef CONFIG_ARM_LPAE
+	str	r7, [r0], #4
+#endif
+	add	r3, r3, #1 << SECTION_SHIFT
+	cmp	r0, r6
+	blo	1b
+
+#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
+	/* we don't need any serial debugging mappings */
+	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
+#endif
+
+#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
+	/*
+	 * If we're using the NetWinder or CATS, we also need to map
+	 * in the 16550-type serial port for the debug messages
+	 */
+	add	r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
+	orr	r3, r7, #0x7c000000
+	str	r3, [r0]
+#endif
+#ifdef CONFIG_ARCH_RPC
+	/*
+	 * Map in screen at 0x02000000 & SCREEN2_BASE
+	 * Similar reasons here - for debug.  This is
+	 * only for Acorn RiscPC architectures.
+	 */
+	add	r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
+	orr	r3, r7, #0x02000000
+	str	r3, [r0]
+	add	r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
+	str	r3, [r0]
+#endif
+#endif
+#ifdef CONFIG_ARM_LPAE
+	sub	r4, r4, #0x1000		@ point to the PGD table
+#endif
+	mov	pc, lr
+ENDPROC(__create_page_tables)
+	.ltorg
+	.align
+__turn_mmu_on_loc:
+	.long	.
+	.long	__turn_mmu_on
+	.long	__turn_mmu_on_end
+
+#if defined(CONFIG_SMP)
+	__CPUINIT
+ENTRY(secondary_startup)
+	/*
+	 * Common entry point for secondary CPUs.
+	 *
+	 * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
+	 * the processor type - there is no need to check the machine type
+	 * as it has already been validated by the primary processor.
+	 */
+	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+	mrc	p15, 0, r9, c0, c0		@ get processor id
+	bl	__lookup_processor_type
+	movs	r10, r5				@ invalid processor?
+	moveq	r0, #'p'			@ yes, error 'p'
+ THUMB( it	eq )		@ force fixup-able long branch encoding
+	beq	__error_p
+
+	/*
+	 * Use the page tables supplied from  __cpu_up.
+	 */
+	adr	r4, __secondary_data
+	ldmia	r4, {r5, r7, r12}		@ address to jump to after
+	sub	lr, r4, r5			@ mmu has been enabled
+	ldr	r4, [r7, lr]			@ get secondary_data.pgdir
+	add	r7, r7, #4
+	ldr	r8, [r7, lr]			@ get secondary_data.swapper_pg_dir
+	adr	lr, BSYM(__enable_mmu)		@ return address
+	mov	r13, r12			@ __secondary_switched address
+ ARM(	add	pc, r10, #PROCINFO_INITFUNC	) @ initialise processor
+						  @ (return control reg)
+ THUMB(	add	r12, r10, #PROCINFO_INITFUNC	)
+ THUMB(	mov	pc, r12				)
+ENDPROC(secondary_startup)
+
+	/*
+	 * r6  = &secondary_data
+	 */
+ENTRY(__secondary_switched)
+	ldr	sp, [r7, #4]			@ get secondary_data.stack
+	mov	fp, #0
+	b	secondary_start_kernel
+ENDPROC(__secondary_switched)
+
+	.align
+
+	.type	__secondary_data, %object
+__secondary_data:
+	.long	.
+	.long	secondary_data
+	.long	__secondary_switched
+#endif /* defined(CONFIG_SMP) */
+
+
+
+/*
+ * Setup common bits before finally enabling the MMU.  Essentially
+ * this is just loading the page table pointer and domain access
+ * registers.
+ *
+ *  r0  = cp#15 control register
+ *  r1  = machine ID
+ *  r2  = atags or dtb pointer
+ *  r4  = page table pointer
+ *  r9  = processor ID
+ *  r13 = *virtual* address to jump to upon completion
+ */
+__enable_mmu:
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
+	orr	r0, r0, #CR_A
+#else
+	bic	r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+	bic	r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+	bic	r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+	bic	r0, r0, #CR_I
+#endif
+#ifdef CONFIG_ARM_LPAE
+	mov	r5, #0
+	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
+#else
+	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
+	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
+#endif
+	b	__turn_mmu_on
+ENDPROC(__enable_mmu)
+
+/*
+ * Enable the MMU.  This completely changes the structure of the visible
+ * memory space.  You will not be able to trace execution through this.
+ * If you have an enquiry about this, *please* check the linux-arm-kernel
+ * mailing list archives BEFORE sending another post to the list.
+ *
+ *  r0  = cp#15 control register
+ *  r1  = machine ID
+ *  r2  = atags or dtb pointer
+ *  r9  = processor ID
+ *  r13 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
+ */
+	.align	5
+	.pushsection	.idmap.text, "ax"
+ENTRY(__turn_mmu_on)
+	mov	r0, r0
+	instr_sync
+	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+	mrc	p15, 0, r3, c0, c0, 0		@ read id reg
+	instr_sync
+	mov	r3, r3
+	mov	r3, r13
+	mov	pc, r3
+__turn_mmu_on_end:
+ENDPROC(__turn_mmu_on)
+	.popsection
+
+
+#ifdef CONFIG_SMP_ON_UP
+	__INIT
+__fixup_smp:
+	and	r3, r9, #0x000f0000	@ architecture version
+	teq	r3, #0x000f0000		@ CPU ID supported?
+	bne	__fixup_smp_on_up	@ no, assume UP
+
+	bic	r3, r9, #0x00ff0000
+	bic	r3, r3, #0x0000000f	@ mask 0xff00fff0
+	mov	r4, #0x41000000
+	orr	r4, r4, #0x0000b000
+	orr	r4, r4, #0x00000020	@ val 0x4100b020
+	teq	r3, r4			@ ARM 11MPCore?
+	moveq	pc, lr			@ yes, assume SMP
+
+	mrc	p15, 0, r0, c0, c0, 5	@ read MPIDR
+	and	r0, r0, #0xc0000000	@ multiprocessing extensions and
+	teq	r0, #0x80000000		@ not part of a uniprocessor system?
+	moveq	pc, lr			@ yes, assume SMP
+
+__fixup_smp_on_up:
+	adr	r0, 1f
+	ldmia	r0, {r3 - r5}
+	sub	r3, r0, r3
+	add	r4, r4, r3
+	add	r5, r5, r3
+	b	__do_fixup_smp_on_up
+ENDPROC(__fixup_smp)
+
+	.align
+1:	.word	.
+	.word	__smpalt_begin
+	.word	__smpalt_end
+
+	.pushsection .data
+	.globl	smp_on_up
+smp_on_up:
+	ALT_SMP(.long	1)
+	ALT_UP(.long	0)
+	.popsection
+#endif
+
+	.text
+__do_fixup_smp_on_up:
+	cmp	r4, r5
+	movhs	pc, lr
+	ldmia	r4!, {r0, r6}
+ ARM(	str	r6, [r0, r3]	)
+ THUMB(	add	r0, r0, r3	)
+#ifdef __ARMEB__
+ THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
+#endif
+ THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
+ THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
+ THUMB(	strh	r6, [r0]	)
+	b	__do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+	stmfd	sp!, {r4 - r6, lr}
+	mov	r4, r0
+	add	r5, r0, r1
+	mov	r3, #0
+	bl	__do_fixup_smp_on_up
+	ldmfd	sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+/* __fixup_pv_table - patch the stub instructions with the delta between
+ * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
+ * can be expressed by an immediate shifter operand. The stub instruction
+ * has a form of '(add|sub) rd, rn, #imm'.
+ */
+	__HEAD
+__fixup_pv_table:
+	adr	r0, 1f
+	ldmia	r0, {r3-r5, r7}
+	sub	r3, r0, r3	@ PHYS_OFFSET - PAGE_OFFSET
+	add	r4, r4, r3	@ adjust table start address
+	add	r5, r5, r3	@ adjust table end address
+	add	r7, r7, r3	@ adjust __pv_phys_offset address
+	str	r8, [r7]	@ save computed PHYS_OFFSET to __pv_phys_offset
+	mov	r6, r3, lsr #24	@ constant for add/sub instructions
+	teq	r3, r6, lsl #24 @ must be 16MiB aligned
+THUMB(	it	ne		@ cross section branch )
+	bne	__error
+	str	r6, [r7, #4]	@ save to __pv_offset
+	b	__fixup_a_pv_table
+ENDPROC(__fixup_pv_table)
+
+	.align
+1:	.long	.
+	.long	__pv_table_begin
+	.long	__pv_table_end
+2:	.long	__pv_phys_offset
+
+	.text
+__fixup_a_pv_table:
+#ifdef CONFIG_THUMB2_KERNEL
+	lsls	r6, #24
+	beq	2f
+	clz	r7, r6
+	lsr	r6, #24
+	lsl	r6, r7
+	bic	r6, #0x0080
+	lsrs	r7, #1
+	orrcs	r6, #0x0080
+	orr	r6, r6, r7, lsl #12
+	orr	r6, #0x4000
+	b	2f
+1:	add     r7, r3
+	ldrh	ip, [r7, #2]
+	and	ip, 0x8f00
+	orr	ip, r6	@ mask in offset bits 31-24
+	strh	ip, [r7, #2]
+2:	cmp	r4, r5
+	ldrcc	r7, [r4], #4	@ use branch for delay slot
+	bcc	1b
+	bx	lr
+#else
+	b	2f
+1:	ldr	ip, [r7, r3]
+	bic	ip, ip, #0x000000ff
+	orr	ip, ip, r6	@ mask in offset bits 31-24
+	str	ip, [r7, r3]
+2:	cmp	r4, r5
+	ldrcc	r7, [r4], #4	@ use branch for delay slot
+	bcc	1b
+	mov	pc, lr
+#endif
+ENDPROC(__fixup_a_pv_table)
+
+ENTRY(fixup_pv_table)
+	stmfd	sp!, {r4 - r7, lr}
+	ldr	r2, 2f			@ get address of __pv_phys_offset
+	mov	r3, #0			@ no offset
+	mov	r4, r0			@ r0 = table start
+	add	r5, r0, r1		@ r1 = table size
+	ldr	r6, [r2, #4]		@ get __pv_offset
+	bl	__fixup_a_pv_table
+	ldmfd	sp!, {r4 - r7, pc}
+ENDPROC(fixup_pv_table)
+
+	.align
+2:	.long	__pv_phys_offset
+
+	.data
+	.globl	__pv_phys_offset
+	.type	__pv_phys_offset, %object
+__pv_phys_offset:
+	.long	0
+	.size	__pv_phys_offset, . - __pv_phys_offset
+__pv_offset:
+	.long	0
+#endif
+
+#include "head-common.S"
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/hw_breakpoint.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/hw_breakpoint.c
new file mode 100644
index 0000000..1a0eb32
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/hw_breakpoint.c
@@ -0,0 +1,1303 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2009, 2010 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+/*
+ * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
+ * using the CPU's debug registers.
+ */
+#define pr_fmt(fmt) "hw-breakpoint: " fmt
+
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+#include <linux/hw_breakpoint_manage.h>
+#endif
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/current.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/kdebug.h>
+#include <asm/traps.h>
+
+/* Breakpoint currently in use for each BRP. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+
+/* Watchpoint currently in use for each WRP. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
+
+/* Number of BRP/WRP registers on this CPU. */
+static int core_num_brps;
+static int core_num_wrps;
+
+/* Debug architecture version. */
+static u8 debug_arch;
+
+/* Maximum supported watchpoint length. */
+static u8 max_watchpoint_len;
+
+#define READ_WB_REG_CASE(OP2, M, VAL)		\
+	case ((OP2 << 4) + M):			\
+		ARM_DBG_READ(c ## M, OP2, VAL); \
+		break
+
+#define WRITE_WB_REG_CASE(OP2, M, VAL)		\
+	case ((OP2 << 4) + M):			\
+		ARM_DBG_WRITE(c ## M, OP2, VAL);\
+		break
+
+#define GEN_READ_WB_REG_CASES(OP2, VAL)		\
+	READ_WB_REG_CASE(OP2, 0, VAL);		\
+	READ_WB_REG_CASE(OP2, 1, VAL);		\
+	READ_WB_REG_CASE(OP2, 2, VAL);		\
+	READ_WB_REG_CASE(OP2, 3, VAL);		\
+	READ_WB_REG_CASE(OP2, 4, VAL);		\
+	READ_WB_REG_CASE(OP2, 5, VAL);		\
+	READ_WB_REG_CASE(OP2, 6, VAL);		\
+	READ_WB_REG_CASE(OP2, 7, VAL);		\
+	READ_WB_REG_CASE(OP2, 8, VAL);		\
+	READ_WB_REG_CASE(OP2, 9, VAL);		\
+	READ_WB_REG_CASE(OP2, 10, VAL);		\
+	READ_WB_REG_CASE(OP2, 11, VAL);		\
+	READ_WB_REG_CASE(OP2, 12, VAL);		\
+	READ_WB_REG_CASE(OP2, 13, VAL);		\
+	READ_WB_REG_CASE(OP2, 14, VAL);		\
+	READ_WB_REG_CASE(OP2, 15, VAL)
+
+#define GEN_WRITE_WB_REG_CASES(OP2, VAL)	\
+	WRITE_WB_REG_CASE(OP2, 0, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 1, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 2, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 3, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 4, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 5, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 6, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 7, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 8, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 9, VAL);		\
+	WRITE_WB_REG_CASE(OP2, 10, VAL);	\
+	WRITE_WB_REG_CASE(OP2, 11, VAL);	\
+	WRITE_WB_REG_CASE(OP2, 12, VAL);	\
+	WRITE_WB_REG_CASE(OP2, 13, VAL);	\
+	WRITE_WB_REG_CASE(OP2, 14, VAL);	\
+	WRITE_WB_REG_CASE(OP2, 15, VAL)
+
+static u32 read_wb_reg(int n)
+{
+	u32 val = 0;
+
+	switch (n) {
+	GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
+	GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
+	GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
+	GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
+	default:
+		pr_warning("attempt to read from unknown breakpoint "
+				"register %d\n", n);
+	}
+
+	return val;
+}
+
+static void write_wb_reg(int n, u32 val)
+{
+	switch (n) {
+	GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
+	GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
+	GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
+	GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
+	default:
+		pr_warning("attempt to write to unknown breakpoint "
+				"register %d\n", n);
+	}
+	isb();
+}
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+u32 arm_bpwp_regs_read(unsigned int n)
+{
+	return read_wb_reg(n);
+}
+
+void arm_bpwp_regs_write(unsigned int n, u32 value)
+{
+	write_wb_reg(n, value);
+}
+
+struct perf_event ** get_wrps_regs(void)
+{	
+	return (struct perf_event **)__get_cpu_var(wp_on_reg);
+}
+
+struct perf_event ** get_brps_regs(void)
+{	
+	return (struct perf_event **)__get_cpu_var(bp_on_reg);
+}
+
+void get_wrps_brps_nums(int *wrps_num, int *brps_num)
+{	
+	*wrps_num = core_num_wrps;
+	*brps_num = core_num_brps;
+}
+
+static u32 hw_breakpoint_get_bytemap(u32 address, u32 size)
+{	
+	u32  start = address & (0x3);	
+	u32  end;	
+	u32  bytemap = 0;	
+	u32  i;	
+
+	end = start + size;	
+	end = end > 4 ? 4: end;	
+	
+	for(i = start; i < end; i++){	
+		bytemap |= (1 << i);	
+	}	
+	return bytemap;
+}
+
+/*
+ * Install a perf counter breakpoint.
+ */
+int sarch_install_step_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	struct perf_event **slot, **slots;
+	int max_slots, ctrl_base, val_base, ret = 0;
+	u32 addr, ctrl;
+	u32 bytemap 	= 0;
+	u32 bcr 		= 0;
+	u32 index_hw 	= 0;
+	
+	hw_breakpoint_enable();
+	
+	addr = info->address;
+	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
+
+	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE ) {
+		/* Breakpoint */
+		ctrl_base 	= ARM_BASE_BCR;
+		val_base 	= ARM_BASE_BVR;
+		slots 		= (struct perf_event **)__get_cpu_var(bp_on_reg);
+		max_slots = core_num_brps;
+	} else {
+		/* Watchpoint */
+		ctrl_base 	= ARM_BASE_WCR;
+		val_base 	= ARM_BASE_WVR;
+		slots 		= (struct perf_event **)__get_cpu_var(wp_on_reg);		
+		max_slots = core_num_wrps;
+	}
+
+	index_hw = bp->attr.bp_index;
+	slot = &slots[index_hw];
+	*slot = bp;
+
+	/* Override the breakpoint data with the step data. */
+	if (info->step_ctrl.enabled) {
+		u32       bp_index = 0;
+		addr 	  = info->trigger & ~0x3;
+		ctrl_base = ARM_BASE_BCR;
+		val_base  = ARM_BASE_BVR;
+		max_slots = core_num_brps;
+		bytemap   = hw_breakpoint_get_bytemap(addr, 4);
+		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+		bp_index = HW_BREAKPOINT_STEP_REGS_INDEX;
+
+		slot = &slots[bp_index];		
+		if (*slot){
+			if(*slot != bp)
+				*slot = bp;
+		}
+		else 
+			*slot = bp;	
+	
+		bcr = read_wb_reg(ctrl_base + bp_index);
+		if (bcr & 0x1)
+		{
+			bcr &= ~(0x1);
+			write_wb_reg(ctrl_base + bp_index, bcr);
+		}
+	
+		write_wb_reg(val_base + bp_index, addr & (~(0x3)));
+
+		bcr = (0x2<<20)|(1<<13) | (bytemap<<5) | (0x3<<1) | (0x1);		
+		write_wb_reg(ctrl_base + bp_index, bcr);
+		return ret;
+	}
+
+	/* Setup the address register. */
+	write_wb_reg(val_base + index_hw, addr);
+	
+	/* Setup the control register. */
+	write_wb_reg(ctrl_base + index_hw, ctrl);
+	return ret;
+}
+
+#endif /*CONFIG_HW_BREAKPOINT_MANAGE*/
+/* Determine debug architecture. */
+static u8 get_debug_arch(void)
+{
+	u32 didr;
+
+	/* Do we implement the extended CPUID interface? */
+	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
+		pr_warning("CPUID feature registers not supported. "
+			   "Assuming v6 debug is present.\n");
+		return ARM_DEBUG_ARCH_V6;
+	}
+
+	ARM_DBG_READ(c0, 0, didr);
+	return (didr >> 16) & 0xf;
+}
+
+u8 arch_get_debug_arch(void)
+{
+	return debug_arch;
+}
+
+static int debug_arch_supported(void)
+{
+	u8 arch = get_debug_arch();
+
+	/* We don't support the memory-mapped interface. */
+	return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
+		arch >= ARM_DEBUG_ARCH_V7_1;
+}
+
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+	return 0;
+}
+
+/* Determine number of WRP registers available. */
+static int get_num_wrp_resources(void)
+{
+	u32 didr;
+	ARM_DBG_READ(c0, 0, didr);
+	return ((didr >> 28) & 0xf) + 1;
+}
+
+/* Determine number of BRP registers available. */
+static int get_num_brp_resources(void)
+{
+	u32 didr;
+	ARM_DBG_READ(c0, 0, didr);
+	return ((didr >> 24) & 0xf) + 1;
+}
+
+/* Does this core support mismatch breakpoints? */
+static int core_has_mismatch_brps(void)
+{
+	return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
+		get_num_brp_resources() > 1);
+}
+
+/* Determine number of usable WRPs available. */
+static int get_num_wrps(void)
+{
+	/*
+	 * On debug architectures prior to 7.1, when a watchpoint fires, the
+	 * only way to work out which watchpoint it was is by disassembling
+	 * the faulting instruction and working out the address of the memory
+	 * access.
+	 *
+	 * Furthermore, we can only do this if the watchpoint was precise
+	 * since imprecise watchpoints prevent us from calculating register
+	 * based addresses.
+	 *
+	 * Providing we have more than 1 breakpoint register, we only report
+	 * a single watchpoint register for the time being. This way, we always
+	 * know which watchpoint fired. In the future we can either add a
+	 * disassembler and address generation emulator, or we can insert a
+	 * check to see if the DFAR is set on watchpoint exception entry
+	 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
+	 * that it is set on some implementations].
+	 */
+	if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
+		return 1;
+
+	return get_num_wrp_resources();
+}
+
+/* Determine number of usable BRPs available. */
+static int get_num_brps(void)
+{
+	int brps = get_num_brp_resources();
+	return core_has_mismatch_brps() ? brps - 1 : brps;
+}
+
+/*
+ * In order to access the breakpoint/watchpoint control registers,
+ * we must be running in debug monitor mode. Unfortunately, we can
+ * be put into halting debug mode at any time by an external debugger
+ * but there is nothing we can do to prevent that.
+ */
+static int enable_monitor_mode(void)
+{
+	u32 dscr;
+	int ret = 0;
+
+	ARM_DBG_READ(c1, 0, dscr);
+
+	/* Ensure that halting mode is disabled. */
+	if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
+		"halting debug mode enabled. Unable to access hardware resources.\n")) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	/* If monitor mode is already enabled, just return. */
+	if (dscr & ARM_DSCR_MDBGEN)
+		goto out;
+
+	/* Write to the corresponding DSCR. */
+	switch (get_debug_arch()) {
+	case ARM_DEBUG_ARCH_V6:
+	case ARM_DEBUG_ARCH_V6_1:
+		ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
+		break;
+	case ARM_DEBUG_ARCH_V7_ECP14:
+	case ARM_DEBUG_ARCH_V7_1:
+	case ARM_DEBUG_ARCH_V8:
+		ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
+		break;
+	default:
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/* Check that the write made it through. */
+	ARM_DBG_READ(c1, 0, dscr);
+	if (!(dscr & ARM_DSCR_MDBGEN))
+		ret = -EPERM;
+
+out:
+	return ret;
+}
+
+int hw_breakpoint_slots(int type)
+{
+	if (!debug_arch_supported())
+		return 0;
+
+	/*
+	 * We can be called early, so don't rely on
+	 * our static variables being initialised.
+	 */
+	switch (type) {
+	case TYPE_INST:
+		return get_num_brps();
+	case TYPE_DATA:
+		return get_num_wrps();
+	default:
+		pr_warning("unknown slot type: %d\n", type);
+		return 0;
+	}
+}
+
+/*
+ * Check if 8-bit byte-address select is available.
+ * This clobbers WRP 0.
+ */
+static u8 get_max_wp_len(void)
+{
+	u32 ctrl_reg;
+	struct arch_hw_breakpoint_ctrl ctrl;
+	u8 size = 4;
+
+	if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
+		goto out;
+
+	memset(&ctrl, 0, sizeof(ctrl));
+	ctrl.len = ARM_BREAKPOINT_LEN_8;
+	ctrl_reg = encode_ctrl_reg(ctrl);
+
+	write_wb_reg(ARM_BASE_WVR, 0);
+	write_wb_reg(ARM_BASE_WCR, ctrl_reg);
+	if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
+		size = 8;
+
+out:
+	return size;
+}
+
+u8 arch_get_max_wp_len(void)
+{
+	return max_watchpoint_len;
+}
+
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	struct perf_event **slot, **slots;
+	int i, max_slots, ctrl_base, val_base, ret = 0;
+	u32 addr, ctrl;
+
+	/* Ensure that we are in monitor mode and halting mode is disabled. */
+	ret = enable_monitor_mode();
+	if (ret)
+		goto out;
+
+	addr = info->address;
+	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
+
+	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+		/* Breakpoint */
+		ctrl_base = ARM_BASE_BCR;
+		val_base = ARM_BASE_BVR;
+		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+		max_slots = core_num_brps;
+	} else {
+		/* Watchpoint */
+		ctrl_base = ARM_BASE_WCR;
+		val_base = ARM_BASE_WVR;
+		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+		max_slots = core_num_wrps;
+	}
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE	
+	i = bp->attr.bp_index;
+	slot = &slots[i];
+	*slot = bp;
+#else	
+	for (i = 0; i < max_slots; ++i) {
+		slot = &slots[i];
+
+		if (!*slot) {
+			*slot = bp;
+			break;
+		}
+	}
+
+	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) {
+		ret = -EBUSY;
+		goto out;
+	}
+#endif 
+
+	/* Override the breakpoint data with the step data. */
+	if (info->step_ctrl.enabled) {
+		addr = info->trigger & ~0x3;
+		ctrl = encode_ctrl_reg(info->step_ctrl);
+		if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
+			i = 0;
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+			ctrl_base = ARM_BASE_BCR;
+			val_base = ARM_BASE_BVR;
+#else	
+			ctrl_base = ARM_BASE_BCR + core_num_brps;
+			val_base = ARM_BASE_BVR + core_num_brps;
+#endif
+		}
+	}
+
+	/* Setup the address register. */
+	write_wb_reg(val_base + i, addr);
+
+	/* Setup the control register. */
+	write_wb_reg(ctrl_base + i, ctrl);
+
+out:
+	return ret;
+}
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+int arch_uninstall_hw_breakpoint(struct perf_event *bp)
+#else 
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+#endif
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	struct perf_event **slot, **slots;
+	int i, max_slots, base;
+
+	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
+		/* Breakpoint */
+		base = ARM_BASE_BCR;
+		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+		max_slots = core_num_brps;
+	} else {
+		/* Watchpoint */
+		base = ARM_BASE_WCR;
+		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+		max_slots = core_num_wrps;
+	}
+
+	/* Remove the breakpoint. */
+	for (i = 0; i < max_slots; ++i) {
+		slot = &slots[i];
+
+		if (*slot == bp) {
+			*slot = NULL;
+			break;
+		}
+	}
+
+	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")){
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+		return HW_BREAKPOINT_FAIL;
+#else
+		return;
+#endif
+
+	}
+	/* Ensure that we disable the mismatch breakpoint. */
+	if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
+	    info->step_ctrl.enabled) {
+		i = 0;
+	#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+		base = ARM_BASE_BCR;
+	#else
+		base = ARM_BASE_BCR + core_num_brps;
+	#endif
+	}
+
+	/* Reset the control register. */
+	write_wb_reg(base + i, 0);
+	
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE	
+	return HW_BREAKPOINT_SUCCESS;
+#endif
+}
+
+static int get_hbp_len(u8 hbp_len)
+{
+	unsigned int len_in_bytes = 0;
+
+	switch (hbp_len) {
+	case ARM_BREAKPOINT_LEN_1:
+		len_in_bytes = 1;
+		break;
+	case ARM_BREAKPOINT_LEN_2:
+		len_in_bytes = 2;
+		break;
+	case ARM_BREAKPOINT_LEN_4:
+		len_in_bytes = 4;
+		break;
+	case ARM_BREAKPOINT_LEN_8:
+		len_in_bytes = 8;
+		break;
+	}
+
+	return len_in_bytes;
+}
+
+/*
+ * Check whether bp virtual address is in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
+{
+	unsigned int len;
+	unsigned long va;
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+	va = info->address;
+	len = get_hbp_len(info->ctrl.len);
+
+	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
+ * Hopefully this will disappear when ptrace can bypass the conversion
+ * to generic breakpoint descriptions.
+ */
+int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+			   int *gen_len, int *gen_type)
+{
+	/* Type */
+	switch (ctrl.type) {
+	case ARM_BREAKPOINT_EXECUTE:
+		*gen_type = HW_BREAKPOINT_X;
+		break;
+	case ARM_BREAKPOINT_LOAD:
+		*gen_type = HW_BREAKPOINT_R;
+		break;
+	case ARM_BREAKPOINT_STORE:
+		*gen_type = HW_BREAKPOINT_W;
+		break;
+	case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
+		*gen_type = HW_BREAKPOINT_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Len */
+	switch (ctrl.len) {
+	case ARM_BREAKPOINT_LEN_1:
+		*gen_len = HW_BREAKPOINT_LEN_1;
+		break;
+	case ARM_BREAKPOINT_LEN_2:
+		*gen_len = HW_BREAKPOINT_LEN_2;
+		break;
+	case ARM_BREAKPOINT_LEN_4:
+		*gen_len = HW_BREAKPOINT_LEN_4;
+		break;
+	case ARM_BREAKPOINT_LEN_8:
+		*gen_len = HW_BREAKPOINT_LEN_8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+static int arch_build_bp_info(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+	/* Type */
+	switch (bp->attr.bp_type) {
+	case HW_BREAKPOINT_X:
+		info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+		break;
+	case HW_BREAKPOINT_R:
+		info->ctrl.type = ARM_BREAKPOINT_LOAD;
+		break;
+	case HW_BREAKPOINT_W:
+		info->ctrl.type = ARM_BREAKPOINT_STORE;
+		break;
+	case HW_BREAKPOINT_RW:
+		info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Len */
+	switch (bp->attr.bp_len) {
+	case HW_BREAKPOINT_LEN_1:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+		break;
+	case HW_BREAKPOINT_LEN_2:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+		break;
+	case HW_BREAKPOINT_LEN_4:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+		break;
+	case HW_BREAKPOINT_LEN_8:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_8;
+		if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+			&& max_watchpoint_len >= 8)
+			break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
+	 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
+	 * by the hardware and must be aligned to the appropriate number of
+	 * bytes.
+	 */
+	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+	    info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+	    info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+		return -EINVAL;
+
+	/* Address */
+	info->address = bp->attr.bp_addr;
+
+	/* Privilege */
+	info->ctrl.privilege = ARM_BREAKPOINT_USER;
+	if (arch_check_bp_in_kernelspace(bp))
+		info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+
+	/* Enabled? */
+	info->ctrl.enabled = !bp->attr.disabled;
+
+	/* Mismatch */
+	info->ctrl.mismatch = 0;
+
+	return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings.
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	int ret = 0;
+	u32 offset, alignment_mask = 0x3;
+
+	/* Build the arch_hw_breakpoint. */
+	ret = arch_build_bp_info(bp);
+	if (ret)
+		goto out;
+
+	/* Check address alignment. */
+	if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+		alignment_mask = 0x7;
+	offset = info->address & alignment_mask;
+	switch (offset) {
+	case 0:
+		/* Aligned */
+		break;
+	case 1:
+		/* Allow single byte watchpoint. */
+		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+			break;
+	case 2:
+		/* Allow halfword watchpoints and breakpoints. */
+		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+			break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+
+	info->address &= ~alignment_mask;
+	info->ctrl.len <<= offset;
+
+	if (!bp->overflow_handler) {
+		/*
+		 * Mismatch breakpoints are required for single-stepping
+		 * breakpoints.
+		 */
+		if (!core_has_mismatch_brps())
+			return -EINVAL;
+
+		/* We don't allow mismatch breakpoints in kernel space. */
+		if (arch_check_bp_in_kernelspace(bp))
+			return -EPERM;
+
+		/*
+		 * Per-cpu breakpoints are not supported by our stepping
+		 * mechanism.
+		 */
+		if (!bp->hw.bp_target)
+			return -EINVAL;
+
+		/*
+		 * We only support specific access types if the fsr
+		 * reports them.
+		 */
+		if (!debug_exception_updates_fsr() &&
+		    (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+		     info->ctrl.type == ARM_BREAKPOINT_STORE))
+			return -EINVAL;
+	}
+
+out:
+	return ret;
+}
+
+/*
+ * Enable/disable single-stepping over the breakpoint bp at address addr.
+ */
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+static int enable_single_step(struct perf_event *bp, u32 addr)
+{
+  	int ret = 0;
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+	
+	ret = arch_uninstall_hw_breakpoint(bp);
+	if(ret != HW_BREAKPOINT_SUCCESS)
+		return HW_BREAKPOINT_FAIL;
+#else
+static void enable_single_step(struct perf_event *bp, u32 addr)
+{
+	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+	arch_uninstall_hw_breakpoint(bp);
+#endif	
+	info->step_ctrl.mismatch  = 1;
+	info->step_ctrl.len	  = ARM_BREAKPOINT_LEN_4;
+	info->step_ctrl.type	  = ARM_BREAKPOINT_EXECUTE;
+	info->step_ctrl.privilege = info->ctrl.privilege;
+	info->step_ctrl.enabled	  = 1;
+	info->trigger		  		= addr;	
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+	return sarch_install_step_breakpoint(bp);	
+#else
+	arch_install_hw_breakpoint(bp);	
+#endif
+}
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+static int disable_single_step(struct perf_event *bp)
+{
+	int ret = 0;
+	
+	ret = arch_uninstall_hw_breakpoint(bp);
+	if(ret != HW_BREAKPOINT_SUCCESS)
+		return HW_BREAKPOINT_FAIL;
+	
+	counter_arch_bp(bp)->step_ctrl.enabled = 0;
+	return arch_install_hw_breakpoint(bp);
+}
+#else
+static void disable_single_step(struct perf_event *bp)
+{
+	arch_uninstall_hw_breakpoint(bp);
+	counter_arch_bp(bp)->step_ctrl.enabled = 0;
+	arch_install_hw_breakpoint(bp);
+}
+#endif
+
+static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+			       struct pt_regs *regs)
+{
+	int i, access;
+	u32 val, ctrl_reg, alignment_mask;
+	struct perf_event *wp, **slots;
+	struct arch_hw_breakpoint *info;
+	struct arch_hw_breakpoint_ctrl ctrl;
+
+	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+
+	for (i = 0; i < core_num_wrps; ++i) {
+		rcu_read_lock();
+
+		wp = slots[i];
+
+		if (wp == NULL)
+			goto unlock;
+
+		info = counter_arch_bp(wp);
+		/*
+		 * The DFAR is an unknown value on debug architectures prior
+		 * to 7.1. Since we only allow a single watchpoint on these
+		 * older CPUs, we can set the trigger to the lowest possible
+		 * faulting address.
+		 */
+		if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
+			BUG_ON(i > 0);
+			info->trigger = wp->attr.bp_addr;
+		} else {
+			if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+				alignment_mask = 0x7;
+			else
+				alignment_mask = 0x3;
+
+			/* Check if the watchpoint value matches. */
+			val = read_wb_reg(ARM_BASE_WVR + i);
+			if (val != (addr & ~alignment_mask))
+				goto unlock;
+
+			/* Possible match, check the byte address select. */
+			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+			decode_ctrl_reg(ctrl_reg, &ctrl);
+			if (!((1 << (addr & alignment_mask)) & ctrl.len))
+				goto unlock;
+
+			/* Check that the access type matches. */
+			if (debug_exception_updates_fsr()) {
+				access = (fsr & ARM_FSR_ACCESS_MASK) ?
+					  HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+				if (!(access & hw_breakpoint_type(wp)))
+					goto unlock;
+			}
+
+			/* We have a winner. */
+			info->trigger = addr;
+		}
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE	
+		/*disable all regs except step breapoint*/
+		hw_breakpoint_do_action(HW_BREAKPOINT_DISABLE_REGS);
+		
+		if (breakpoint_step_flag == 0){	
+			int 	ret 		= 0;
+			u32 	step_addr 	= 0;		
+			wp->attr.step_addr  = 0;
+			step_addr = instruction_pointer(regs) + HW_BREAKPOINT_ADDR_STEP_LEN;
+			ret = enable_single_step(wp, step_addr);
+			if (ret == HW_BREAKPOINT_FAIL){				
+				if(read_wb_reg(ARM_BASE_WCR + wp->attr.bp_index) != 0){
+					write_wb_reg(ARM_BASE_WCR + wp->attr.bp_index, 0);				
+					printk("[HW breakpoint]enable single step fail, watchpoint disabled\n");
+				}
+				printk("[HW breakpoint]single step breakpoint set Fail\n");
+			}
+			else
+				wp->attr.step_addr = step_addr;
+			breakpoint_step_flag = 1;
+		}
+#else
+		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+		perf_bp_event(wp, regs);
+#endif
+		/*
+		 * If no overflow handler is present, insert a temporary
+		 * mismatch breakpoint so we can single-step over the
+		 * watchpoint trigger.
+		 */
+		if (!wp->overflow_handler){
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE			
+			enable_single_step(wp, instruction_pointer(regs) + HW_BREAKPOINT_ADDR_STEP_LEN);
+			breakpoint_step_flag = 1;
+#else
+			enable_single_step(wp, instruction_pointer(regs));
+#endif
+		}
+
+unlock:
+		rcu_read_unlock();
+	}
+}
+
+static void watchpoint_single_step_handler(unsigned long pc)
+{
+	int i;
+	struct perf_event *wp, **slots;
+	struct arch_hw_breakpoint *info;
+
+	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+
+	for (i = 0; i < core_num_wrps; ++i) {
+		rcu_read_lock();
+
+		wp = slots[i];
+
+		if (wp == NULL)
+			goto unlock;
+
+		info = counter_arch_bp(wp);
+		if (!info->step_ctrl.enabled)
+			goto unlock;
+
+		/*
+		 * Restore the original watchpoint if we've completed the
+		 * single-step.
+		 */
+		if (info->trigger != pc)
+			disable_single_step(wp);
+
+unlock:
+		rcu_read_unlock();
+	}
+}
+
+static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
+{
+	int i;
+	u32 ctrl_reg, val, addr;
+	struct perf_event *bp, **slots;
+	struct arch_hw_breakpoint *info;
+	struct arch_hw_breakpoint_ctrl ctrl;
+
+	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+
+	/* The exception entry code places the amended lr in the PC. */
+	addr = regs->ARM_pc;
+
+	/* Check the currently installed breakpoints first. */
+	for (i = 0; i < core_num_brps; ++i) {
+		rcu_read_lock();
+
+		bp = slots[i];
+
+		if (bp == NULL)
+			goto unlock;
+
+		info = counter_arch_bp(bp);
+
+		/* Check if the breakpoint value matches. */
+		val = read_wb_reg(ARM_BASE_BVR + i);
+		if (val != (addr & ~0x3))
+			goto mismatch;
+
+		/* Possible match, check the byte address select to confirm. */
+		ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
+		decode_ctrl_reg(ctrl_reg, &ctrl);
+		if ((1 << (addr & 0x3)) & ctrl.len) {
+			info->trigger = addr;
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+			if (breakpoint_step_flag == 1){
+				if (info->step_ctrl.enabled)				
+					disable_single_step(bp);	
+
+				if (hw_breakpoint_match_databreak(breakpoint_far_val) == HW_BREAKPOINT_SUCCESS)
+					perf_bp_event(bp, regs);
+	
+				breakpoint_step_flag = 0;
+				hw_breakpoint_do_action(HW_BREAKPOINT_ENABLE_REGS);
+			}
+			else {
+				u32 	step_addr = 0;
+				perf_bp_event(bp, regs);
+				step_addr = instruction_pointer(regs) + HW_BREAKPOINT_ADDR_STEP_LEN;
+				enable_single_step(bp, step_addr);
+				breakpoint_step_flag = 1;				
+				hw_breakpoint_do_action(HW_BREAKPOINT_DISABLE_REGS);
+			}
+			if (!bp->overflow_handler)
+				enable_single_step(bp, instruction_pointer(regs) + HW_BREAKPOINT_ADDR_STEP_LEN);
+#else
+			pr_debug("breakpoint fired: address = 0x%x\n", addr);
+			perf_bp_event(bp, regs);
+			if (!bp->overflow_handler)
+				enable_single_step(bp, addr);
+#endif
+			goto unlock;
+		}
+
+mismatch:
+		/* If we're stepping a breakpoint, it can now be restored. */
+		if (info->step_ctrl.enabled)
+			disable_single_step(bp);
+unlock:
+		rcu_read_unlock();
+	}
+
+	/* Handle any pending watchpoint single-step breakpoints. */
+	watchpoint_single_step_handler(addr);
+}
+
+/*
+ * Called from either the Data Abort Handler [watchpoint] or the
+ * Prefetch Abort Handler [breakpoint] with interrupts disabled.
+ */
+static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
+				 struct pt_regs *regs)
+{
+	int	ret  = 0;	
+	u32 dscr;
+
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+	breakpoint_far_val 	= far_read();
+#endif
+	preempt_disable();
+
+	if (interrupts_enabled(regs))
+		local_irq_enable();
+
+	/* We only handle watchpoints and hardware breakpoints. */
+	ARM_DBG_READ(c1, 0, dscr);
+
+	/* Perform perf callbacks. */
+	switch (ARM_DSCR_MOE(dscr)) {
+	case ARM_ENTRY_BREAKPOINT:
+		breakpoint_handler(addr, regs);
+		break;
+	case ARM_ENTRY_ASYNC_WATCHPOINT:
+		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
+	case ARM_ENTRY_SYNC_WATCHPOINT:
+		watchpoint_handler(addr, fsr, regs);
+		break;
+	default:
+		ret = 1; /* Unhandled fault. */
+	}
+
+	preempt_enable();
+
+	return ret;
+}
+
+/*
+ * One-time initialisation.
+ */
+static cpumask_t debug_err_mask;
+
+static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
+{
+	int cpu = smp_processor_id();
+
+	pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
+		   instr, cpu);
+
+	/* Set the error flag for this CPU and skip the faulting instruction. */
+	cpumask_set_cpu(cpu, &debug_err_mask);
+	instruction_pointer(regs) += 4;
+	return 0;
+}
+
+static struct undef_hook debug_reg_hook = {
+	.instr_mask	= 0x0fe80f10,
+	.instr_val	= 0x0e000e10,
+	.fn		= debug_reg_trap,
+};
+
+static void reset_ctrl_regs(void *unused)
+{
+	int i, raw_num_brps, err = 0, cpu = smp_processor_id();
+	u32 dbg_power;
+
+	/*
+	 * v7 debug contains save and restore registers so that debug state
+	 * can be maintained across low-power modes without leaving the debug
+	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+	 * the debug registers out of reset, so we must unlock the OS Lock
+	 * Access Register to avoid taking undefined instruction exceptions
+	 * later on.
+	 */
+	switch (debug_arch) {
+	case ARM_DEBUG_ARCH_V6:
+	case ARM_DEBUG_ARCH_V6_1:
+		/* ARMv6 cores just need to reset the registers. */
+		goto reset_regs;
+	case ARM_DEBUG_ARCH_V7_ECP14:
+		/*
+		 * Ensure sticky power-down is clear (i.e. debug logic is
+		 * powered up).
+		 */
+		asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
+		if ((dbg_power & 0x1) == 0)
+			err = -EPERM;
+		break;
+	case ARM_DEBUG_ARCH_V7_1:
+		/*
+		 * Ensure the OS double lock is clear.
+		 */
+		asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
+		if ((dbg_power & 0x1) == 1)
+			err = -EPERM;
+		break;
+	}
+
+	if (err) {
+		pr_warning("CPU %d debug is powered down!\n", cpu);
+		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
+		return;
+	}
+
+	/*
+	 * Unconditionally clear the lock by writing a value
+	 * other than 0xC5ACCE55 to the access register.
+	 */
+	asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
+	isb();
+
+	/*
+	 * Clear any configured vector-catch events before
+	 * enabling monitor mode.
+	 */
+	asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+	isb();
+
+reset_regs:
+	if (enable_monitor_mode())
+		return;
+
+	/* We must also reset any reserved registers. */
+	raw_num_brps = get_num_brp_resources();
+	for (i = 0; i < raw_num_brps; ++i) {
+		write_wb_reg(ARM_BASE_BCR + i, 0UL);
+		write_wb_reg(ARM_BASE_BVR + i, 0UL);
+	}
+
+	for (i = 0; i < core_num_wrps; ++i) {
+		write_wb_reg(ARM_BASE_WCR + i, 0UL);
+		write_wb_reg(ARM_BASE_WVR + i, 0UL);
+	}
+}
+
+static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+				      unsigned long action, void *cpu)
+{
+	if (action == CPU_ONLINE)
+		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_reset_nb = {
+	.notifier_call = dbg_reset_notify,
+};
+
+static int __init arch_hw_breakpoint_init(void)
+{
+	u32 dscr;
+
+	debug_arch = get_debug_arch();
+
+	if (!debug_arch_supported()) {
+		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
+		return 0;
+	}
+
+	/* Determine how many BRPs/WRPs are available. */
+	core_num_brps = get_num_brps();
+	core_num_wrps = get_num_wrps();
+	
+#ifdef CONFIG_HW_BREAKPOINT_MANAGE
+	hw_breakpoint_pool_init();
+#endif
+	/*
+	 * We need to tread carefully here because DBGSWENABLE may be
+	 * driven low on this core and there isn't an architected way to
+	 * determine that.
+	 */
+	register_undef_hook(&debug_reg_hook);
+
+	/*
+	 * Reset the breakpoint resources. We assume that a halting
+	 * debugger will leave the world in a nice state for us.
+	 */
+	on_each_cpu(reset_ctrl_regs, NULL, 1);
+	unregister_undef_hook(&debug_reg_hook);
+	if (!cpumask_empty(&debug_err_mask)) {
+		core_num_brps = 0;
+		core_num_wrps = 0;
+		return 0;
+	}
+
+	pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
+		core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
+		"", core_num_wrps);
+
+	ARM_DBG_READ(c1, 0, dscr);
+	if (dscr & ARM_DSCR_HDBGEN) {
+		max_watchpoint_len = 4;
+		pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n",
+			   max_watchpoint_len);
+	} else {
+		/* Work out the maximum supported watchpoint length. */
+		max_watchpoint_len = get_max_wp_len();
+		pr_info("maximum watchpoint size is %u bytes.\n",
+				max_watchpoint_len);
+	}
+
+	/* Register debug fault handler. */
+	hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+			TRAP_HWBKPT, "watchpoint debug exception");
+	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+			TRAP_HWBKPT, "breakpoint debug exception");
+
+	/* Register hotplug notifier. */
+	register_cpu_notifier(&dbg_reset_nb);
+	return 0;
+}
+arch_initcall(arch_hw_breakpoint_init);
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+/*
+ * Dummy function to register with die_notifier.
+ */
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+					unsigned long val, void *data)
+{
+	return NOTIFY_DONE;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/init_task.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/init_task.c
new file mode 100644
index 0000000..e7cbb50
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/init_task.c
@@ -0,0 +1,37 @@
+/*
+ *  linux/arch/arm/kernel/init_task.c
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union thread_union init_thread_union __init_task_data =
+	{ INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.c
new file mode 100644
index 0000000..b760340
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.c
@@ -0,0 +1,62 @@
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <asm/opcodes.h>
+
+static unsigned long
+__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+{
+	unsigned long s, j1, j2, i1, i2, imm10, imm11;
+	unsigned long first, second;
+	long offset;
+
+	offset = (long)addr - (long)(pc + 4);
+	if (offset < -16777216 || offset > 16777214) {
+		WARN_ON_ONCE(1);
+		return 0;
+	}
+
+	s	= (offset >> 24) & 0x1;
+	i1	= (offset >> 23) & 0x1;
+	i2	= (offset >> 22) & 0x1;
+	imm10	= (offset >> 12) & 0x3ff;
+	imm11	= (offset >>  1) & 0x7ff;
+
+	j1 = (!i1) ^ s;
+	j2 = (!i2) ^ s;
+
+	first = 0xf000 | (s << 10) | imm10;
+	second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
+	if (link)
+		second |= 1 << 14;
+
+	return __opcode_thumb32_compose(first, second);
+}
+
+static unsigned long
+__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+{
+	unsigned long opcode = 0xea000000;
+	long offset;
+
+	if (link)
+		opcode |= 1 << 24;
+
+	offset = (long)addr - (long)(pc + 8);
+	if (unlikely(offset < -33554432 || offset > 33554428)) {
+		WARN_ON_ONCE(1);
+		return 0;
+	}
+
+	offset = (offset >> 2) & 0x00ffffff;
+
+	return opcode | offset;
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+{
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
+		return __arm_gen_branch_thumb2(pc, addr, link);
+	else
+		return __arm_gen_branch_arm(pc, addr, link);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.h
new file mode 100644
index 0000000..e96065d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/insn.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	return 0xf3af8000; /* nop.w */
+#else
+	return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+	return __arm_gen_branch(pc, addr, false);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr)
+{
+	return __arm_gen_branch(pc, addr, true);
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/io.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/io.c
new file mode 100644
index 0000000..dcd5b4d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/io.c
@@ -0,0 +1,50 @@
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+	unsigned char *t = to;
+	while (count) {
+		count--;
+		*t = readb(from);
+		t++;
+		from++;
+	}
+}
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+	const unsigned char *f = from;
+	while (count) {
+		count--;
+		writeb(*f, to);
+		f++;
+		to++;
+	}
+}
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void _memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+	while (count) {
+		count--;
+		writeb(c, dst);
+		dst++;
+	}
+}
+
+EXPORT_SYMBOL(_memcpy_fromio);
+EXPORT_SYMBOL(_memcpy_toio);
+EXPORT_SYMBOL(_memset_io);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/irq.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/irq.c
new file mode 100644
index 0000000..6d05f06
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/irq.c
@@ -0,0 +1,198 @@
+/*
+ *  linux/arch/arm/kernel/irq.c
+ *
+ *  Copyright (C) 1992 Linus Torvalds
+ *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
+ *
+ *  Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
+ *  Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
+ *  Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file contains the code used by various IRQ handling routines:
+ *  asking for different IRQ's should be done through these routines
+ *  instead of just grabbing them. Thus setups with different IRQ numbers
+ *  shouldn't result in any weird surprises, and installing new handlers
+ *  should be easier.
+ *
+ *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ *  Naturally it's not a 1:1 relation, but there are similarities.
+ */
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/proc_fs.h>
+
+#include <asm/exception.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/time.h>
+
+/*
+ * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
+ */
+#ifndef irq_finish
+#define irq_finish(irq) do { } while (0)
+#endif
+
+unsigned long irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#ifdef CONFIG_FIQ
+	show_fiq_list(p, prec);
+#endif
+#ifdef CONFIG_SMP
+	show_ipi_list(p, prec);
+#endif
+	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+	return 0;
+}
+
+/*
+ * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
+ * not come via this function.  Instead, they should provide their
+ * own 'handler'.  Used by platform code implementing C-based 1st
+ * level decoding.
+ */
+void handle_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	irq_enter();
+    zxic_trace_irq_enter(irq);
+
+	/*
+	 * Some hardware gives randomly wrong interrupts.  Rather
+	 * than crashing, do something sensible.
+	 */
+	if (unlikely(irq >= nr_irqs)) {
+		if (printk_ratelimit())
+			printk(KERN_WARNING "Bad IRQ%u\n", irq);
+		ack_bad_irq(irq);
+	} else {
+		generic_handle_irq(irq);
+	}
+
+	/* AT91 specific workaround */
+	irq_finish(irq);
+
+    zxic_trace_irq_exit(irq);
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+/*
+ * asm_do_IRQ is the interface to be used from assembly code.
+ */
+asmlinkage void __exception_irq_entry
+asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+	handle_IRQ(irq, regs);
+}
+
+void set_irq_flags(unsigned int irq, unsigned int iflags)
+{
+	unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+
+	if (irq >= nr_irqs) {
+		printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
+		return;
+	}
+
+	if (iflags & IRQF_VALID)
+		clr |= IRQ_NOREQUEST;
+	if (iflags & IRQF_PROBE)
+		clr |= IRQ_NOPROBE;
+	if (!(iflags & IRQF_NOAUTOEN))
+		clr |= IRQ_NOAUTOEN;
+	/* Order is clear bits in "clr" then set bits in "set" */
+	irq_modify_status(irq, clr, set & ~clr);
+}
+
+void __init init_IRQ(void)
+{
+	machine_desc->init_irq();
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+	nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
+	return nr_irqs;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+	const struct cpumask *affinity = d->affinity;
+	struct irq_chip *c;
+	bool ret = false;
+
+	/*
+	 * If this is a per-CPU interrupt, or the affinity does not
+	 * include this CPU, then we have nothing to do.
+	 */
+	if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+		return false;
+
+	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+		affinity = cpu_online_mask;
+		ret = true;
+	}
+
+	c = irq_data_get_irq_chip(d);
+	if (!c->irq_set_affinity)
+		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+		cpumask_copy(d->affinity, affinity);
+
+	return ret;
+}
+
+/*
+ * The current CPU has been marked offline.  Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+	unsigned int i;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	for_each_irq_desc(i, desc) {
+		bool affinity_broken;
+
+		raw_spin_lock(&desc->lock);
+		affinity_broken = migrate_one_irq(desc);
+		raw_spin_unlock(&desc->lock);
+
+		if (affinity_broken && printk_ratelimit())
+			pr_warning("IRQ%u no longer affine to CPU%u\n", i,
+				smp_processor_id());
+	}
+
+	local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/isa.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/isa.c
new file mode 100644
index 0000000..3464859
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/isa.c
@@ -0,0 +1,70 @@
+/*
+ *  linux/arch/arm/kernel/isa.c
+ *
+ *  Copyright (C) 1999 Phil Blundell
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *  ISA shared memory and I/O port support, and is required to support
+ *  iopl, inb, outb and friends in userspace via glibc emulation.
+ */
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+static unsigned int isa_membase, isa_portbase, isa_portshift;
+
+static ctl_table ctl_isa_vars[4] = {
+	{
+		.procname	= "membase",
+		.data		= &isa_membase, 
+		.maxlen		= sizeof(isa_membase),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	}, {
+		.procname	= "portbase",
+		.data		= &isa_portbase, 
+		.maxlen		= sizeof(isa_portbase),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	}, {
+		.procname	= "portshift",
+		.data		= &isa_portshift, 
+		.maxlen		= sizeof(isa_portshift),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
+	}, {}
+};
+
+static struct ctl_table_header *isa_sysctl_header;
+
+static ctl_table ctl_isa[2] = {
+	{
+		.procname	= "isa",
+		.mode		= 0555,
+		.child		= ctl_isa_vars,
+	}, {}
+};
+
+static ctl_table ctl_bus[2] = {
+	{
+		.procname	= "bus",
+		.mode		= 0555,
+		.child		= ctl_isa,
+	}, {}
+};
+
+void __init
+register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift)
+{
+	isa_membase = membase;
+	isa_portbase = portbase;
+	isa_portshift = portshift;
+	isa_sysctl_header = register_sysctl_table(ctl_bus);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/iwmmxt.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/iwmmxt.S
new file mode 100644
index 0000000..a087838
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/iwmmxt.S
@@ -0,0 +1,346 @@
+/*
+ *  linux/arch/arm/kernel/iwmmxt.S
+ *
+ *  XScale iWMMXt (Concan) context switching and handling
+ *
+ *  Initial code:
+ *  Copyright (c) 2003, Intel Corporation
+ *
+ *  Full lazy switching support, optimizations and more, by Nicolas Pitre
+*   Copyright (c) 2003-2004, MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#if defined(CONFIG_CPU_PJ4)
+#define PJ4(code...)		code
+#define XSC(code...)
+#else
+#define PJ4(code...)
+#define XSC(code...)		code
+#endif
+
+#define MMX_WR0		 	(0x00)
+#define MMX_WR1		 	(0x08)
+#define MMX_WR2		 	(0x10)
+#define MMX_WR3			(0x18)
+#define MMX_WR4		 	(0x20)
+#define MMX_WR5		 	(0x28)
+#define MMX_WR6		 	(0x30)
+#define MMX_WR7		 	(0x38)
+#define MMX_WR8		 	(0x40)
+#define MMX_WR9		 	(0x48)
+#define MMX_WR10		(0x50)
+#define MMX_WR11		(0x58)
+#define MMX_WR12		(0x60)
+#define MMX_WR13		(0x68)
+#define MMX_WR14		(0x70)
+#define MMX_WR15		(0x78)
+#define MMX_WCSSF		(0x80)
+#define MMX_WCASF		(0x84)
+#define MMX_WCGR0		(0x88)
+#define MMX_WCGR1		(0x8C)
+#define MMX_WCGR2		(0x90)
+#define MMX_WCGR3		(0x94)
+
+#define MMX_SIZE		(0x98)
+
+	.text
+
+/*
+ * Lazy switching of Concan coprocessor context
+ *
+ * r10 = struct thread_info pointer
+ * r9  = ret_from_exception
+ * lr  = undefined instr exit
+ *
+ * called from prefetch exception handler with interrupts disabled
+ */
+
+ENTRY(iwmmxt_task_enable)
+
+	XSC(mrc	p15, 0, r2, c15, c1, 0)
+	PJ4(mrc p15, 0, r2, c1, c0, 2)
+	@ CP0 and CP1 accessible?
+	XSC(tst	r2, #0x3)
+	PJ4(tst	r2, #0xf)
+	movne	pc, lr				@ if so no business here
+	@ enable access to CP0 and CP1
+	XSC(orr	r2, r2, #0x3)
+	XSC(mcr	p15, 0, r2, c15, c1, 0)
+	PJ4(orr	r2, r2, #0xf)
+	PJ4(mcr	p15, 0, r2, c1, c0, 2)
+
+	ldr	r3, =concan_owner
+	add	r0, r10, #TI_IWMMXT_STATE	@ get task Concan save area
+	ldr	r2, [sp, #60]			@ current task pc value
+	ldr	r1, [r3]			@ get current Concan owner
+	str	r0, [r3]			@ this task now owns Concan regs
+	sub	r2, r2, #4			@ adjust pc back
+	str	r2, [sp, #60]
+
+	mrc	p15, 0, r2, c2, c0, 0
+	mov	r2, r2				@ cpwait
+
+	teq	r1, #0				@ test for last ownership
+	mov	lr, r9				@ normal exit from exception
+	beq	concan_load			@ no owner, skip save
+
+concan_save:
+
+	tmrc	r2, wCon
+
+	@ CUP? wCx
+	tst	r2, #0x1
+	beq 	1f
+
+concan_dump:
+
+	wstrw	wCSSF, [r1, #MMX_WCSSF]
+	wstrw	wCASF, [r1, #MMX_WCASF]
+	wstrw	wCGR0, [r1, #MMX_WCGR0]
+	wstrw	wCGR1, [r1, #MMX_WCGR1]
+	wstrw	wCGR2, [r1, #MMX_WCGR2]
+	wstrw	wCGR3, [r1, #MMX_WCGR3]
+
+1:	@ MUP? wRn
+	tst	r2, #0x2
+	beq	2f
+
+	wstrd	wR0,  [r1, #MMX_WR0]
+	wstrd	wR1,  [r1, #MMX_WR1]
+	wstrd	wR2,  [r1, #MMX_WR2]
+	wstrd	wR3,  [r1, #MMX_WR3]
+	wstrd	wR4,  [r1, #MMX_WR4]
+	wstrd	wR5,  [r1, #MMX_WR5]
+	wstrd	wR6,  [r1, #MMX_WR6]
+	wstrd	wR7,  [r1, #MMX_WR7]
+	wstrd	wR8,  [r1, #MMX_WR8]
+	wstrd	wR9,  [r1, #MMX_WR9]
+	wstrd	wR10, [r1, #MMX_WR10]
+	wstrd	wR11, [r1, #MMX_WR11]
+	wstrd	wR12, [r1, #MMX_WR12]
+	wstrd	wR13, [r1, #MMX_WR13]
+	wstrd	wR14, [r1, #MMX_WR14]
+	wstrd	wR15, [r1, #MMX_WR15]
+
+2:	teq	r0, #0				@ anything to load?
+	moveq	pc, lr
+
+concan_load:
+
+	@ Load wRn
+	wldrd	wR0,  [r0, #MMX_WR0]
+	wldrd	wR1,  [r0, #MMX_WR1]
+	wldrd	wR2,  [r0, #MMX_WR2]
+	wldrd	wR3,  [r0, #MMX_WR3]
+	wldrd	wR4,  [r0, #MMX_WR4]
+	wldrd	wR5,  [r0, #MMX_WR5]
+	wldrd	wR6,  [r0, #MMX_WR6]
+	wldrd	wR7,  [r0, #MMX_WR7]
+	wldrd	wR8,  [r0, #MMX_WR8]
+	wldrd	wR9,  [r0, #MMX_WR9]
+	wldrd	wR10, [r0, #MMX_WR10]
+	wldrd	wR11, [r0, #MMX_WR11]
+	wldrd	wR12, [r0, #MMX_WR12]
+	wldrd	wR13, [r0, #MMX_WR13]
+	wldrd	wR14, [r0, #MMX_WR14]
+	wldrd	wR15, [r0, #MMX_WR15]
+
+	@ Load wCx
+	wldrw	wCSSF, [r0, #MMX_WCSSF]
+	wldrw	wCASF, [r0, #MMX_WCASF]
+	wldrw	wCGR0, [r0, #MMX_WCGR0]
+	wldrw	wCGR1, [r0, #MMX_WCGR1]
+	wldrw	wCGR2, [r0, #MMX_WCGR2]
+	wldrw	wCGR3, [r0, #MMX_WCGR3]
+
+	@ clear CUP/MUP (only if r1 != 0)
+	teq	r1, #0
+	mov 	r2, #0
+	moveq	pc, lr
+	tmcr	wCon, r2
+	mov	pc, lr
+
+/*
+ * Back up Concan regs to save area and disable access to them
+ * (mainly for gdb or sleep mode usage)
+ *
+ * r0 = struct thread_info pointer of target task or NULL for any
+ */
+
+ENTRY(iwmmxt_task_disable)
+
+	stmfd	sp!, {r4, lr}
+
+	mrs	ip, cpsr
+	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
+	msr	cpsr_c, r2
+
+	ldr	r3, =concan_owner
+	add	r2, r0, #TI_IWMMXT_STATE	@ get task Concan save area
+	ldr	r1, [r3]			@ get current Concan owner
+	teq	r1, #0				@ any current owner?
+	beq	1f				@ no: quit
+	teq	r0, #0				@ any owner?
+	teqne	r1, r2				@ or specified one?
+	bne	1f				@ no: quit
+
+	@ enable access to CP0 and CP1
+	XSC(mrc	p15, 0, r4, c15, c1, 0)
+	XSC(orr	r4, r4, #0x3)
+	XSC(mcr	p15, 0, r4, c15, c1, 0)
+	PJ4(mrc p15, 0, r4, c1, c0, 2)
+	PJ4(orr	r4, r4, #0xf)
+	PJ4(mcr	p15, 0, r4, c1, c0, 2)
+
+	mov	r0, #0				@ nothing to load
+	str	r0, [r3]			@ no more current owner
+	mrc	p15, 0, r2, c2, c0, 0
+	mov	r2, r2				@ cpwait
+	bl	concan_save
+
+	@ disable access to CP0 and CP1
+	XSC(bic	r4, r4, #0x3)
+	XSC(mcr	p15, 0, r4, c15, c1, 0)
+	PJ4(bic	r4, r4, #0xf)
+	PJ4(mcr	p15, 0, r4, c1, c0, 2)
+
+	mrc	p15, 0, r2, c2, c0, 0
+	mov	r2, r2				@ cpwait
+
+1:	msr	cpsr_c, ip			@ restore interrupt mode
+	ldmfd	sp!, {r4, pc}
+
+/*
+ * Copy Concan state to given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to store Concan state
+ *
+ * this is called mainly in the creation of signal stack frames
+ */
+
+ENTRY(iwmmxt_task_copy)
+
+	mrs	ip, cpsr
+	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
+	msr	cpsr_c, r2
+
+	ldr	r3, =concan_owner
+	add	r2, r0, #TI_IWMMXT_STATE	@ get task Concan save area
+	ldr	r3, [r3]			@ get current Concan owner
+	teq	r2, r3				@ does this task own it...
+	beq	1f
+
+	@ current Concan values are in the task save area
+	msr	cpsr_c, ip			@ restore interrupt mode
+	mov	r0, r1
+	mov	r1, r2
+	mov	r2, #MMX_SIZE
+	b	memcpy
+
+1:	@ this task owns Concan regs -- grab a copy from there
+	mov	r0, #0				@ nothing to load
+	mov	r2, #3				@ save all regs
+	mov	r3, lr				@ preserve return address
+	bl	concan_dump
+	msr	cpsr_c, ip			@ restore interrupt mode
+	mov	pc, r3
+
+/*
+ * Restore Concan state from given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to get Concan state from
+ *
+ * this is used to restore Concan state when unwinding a signal stack frame
+ */
+
+ENTRY(iwmmxt_task_restore)
+
+	mrs	ip, cpsr
+	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
+	msr	cpsr_c, r2
+
+	ldr	r3, =concan_owner
+	add	r2, r0, #TI_IWMMXT_STATE	@ get task Concan save area
+	ldr	r3, [r3]			@ get current Concan owner
+	bic	r2, r2, #0x7			@ 64-bit alignment
+	teq	r2, r3				@ does this task own it...
+	beq	1f
+
+	@ this task doesn't own Concan regs -- use its save area
+	msr	cpsr_c, ip			@ restore interrupt mode
+	mov	r0, r2
+	mov	r2, #MMX_SIZE
+	b	memcpy
+
+1:	@ this task owns Concan regs -- load them directly
+	mov	r0, r1
+	mov	r1, #0				@ don't clear CUP/MUP
+	mov	r3, lr				@ preserve return address
+	bl	concan_load
+	msr	cpsr_c, ip			@ restore interrupt mode
+	mov	pc, r3
+
+/*
+ * Concan handling on task switch
+ *
+ * r0 = next thread_info pointer
+ *
+ * Called only from the iwmmxt notifier with task preemption disabled.
+ */
+ENTRY(iwmmxt_task_switch)
+
+	XSC(mrc	p15, 0, r1, c15, c1, 0)
+	PJ4(mrc	p15, 0, r1, c1, c0, 2)
+	@ CP0 and CP1 accessible?
+	XSC(tst	r1, #0x3)
+	PJ4(tst	r1, #0xf)
+	bne	1f				@ yes: block them for next task
+
+	ldr	r2, =concan_owner
+	add	r3, r0, #TI_IWMMXT_STATE	@ get next task Concan save area
+	ldr	r2, [r2]			@ get current Concan owner
+	teq	r2, r3				@ next task owns it?
+	movne	pc, lr				@ no: leave Concan disabled
+
+1:	@ flip Concan access
+	XSC(eor	r1, r1, #0x3)
+	XSC(mcr	p15, 0, r1, c15, c1, 0)
+	PJ4(eor r1, r1, #0xf)
+	PJ4(mcr	p15, 0, r1, c1, c0, 2)
+
+	mrc	p15, 0, r1, c2, c0, 0
+	sub	pc, lr, r1, lsr #32		@ cpwait and return
+
+/*
+ * Remove Concan ownership of given task
+ *
+ * r0 = struct thread_info pointer
+ */
+ENTRY(iwmmxt_task_release)
+
+	mrs	r2, cpsr
+	orr	ip, r2, #PSR_I_BIT		@ disable interrupts
+	msr	cpsr_c, ip
+	ldr	r3, =concan_owner
+	add	r0, r0, #TI_IWMMXT_STATE	@ get task Concan save area
+	ldr	r1, [r3]			@ get current Concan owner
+	eors	r0, r0, r1			@ if equal...
+	streq	r0, [r3]			@ then clear ownership
+	msr	cpsr_c, r2			@ restore interrupts
+	mov	pc, lr
+
+	.data
+concan_owner:
+	.word	0
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/jump_label.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/jump_label.c
new file mode 100644
index 0000000..4ce4f78
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/jump_label.c
@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+
+#include "insn.h"
+#include "patch.h"
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+					enum jump_label_type type,
+					bool is_static)
+{
+	void *addr = (void *)entry->code;
+	unsigned int insn;
+
+	if (type == JUMP_LABEL_ENABLE)
+		insn = arm_gen_branch(entry->code, entry->target);
+	else
+		insn = arm_gen_nop();
+
+	if (is_static)
+		__patch_text(addr, insn);
+	else
+		patch_text(addr, insn);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+			       enum jump_label_type type)
+{
+	__arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+				      enum jump_label_type type)
+{
+	__arch_jump_label_transform(entry, type, true);
+}
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kgdb.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kgdb.c
new file mode 100644
index 0000000..778c2f7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kgdb.c
@@ -0,0 +1,255 @@
+/*
+ * arch/arm/kernel/kgdb.c
+ *
+ * ARM KGDB support
+ *
+ * Copyright (c) 2002-2004 MontaVista Software, Inc
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors:  George Davis <davis_g@mvista.com>
+ *           Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+	{ "r0", 4, offsetof(struct pt_regs, ARM_r0)},
+	{ "r1", 4, offsetof(struct pt_regs, ARM_r1)},
+	{ "r2", 4, offsetof(struct pt_regs, ARM_r2)},
+	{ "r3", 4, offsetof(struct pt_regs, ARM_r3)},
+	{ "r4", 4, offsetof(struct pt_regs, ARM_r4)},
+	{ "r5", 4, offsetof(struct pt_regs, ARM_r5)},
+	{ "r6", 4, offsetof(struct pt_regs, ARM_r6)},
+	{ "r7", 4, offsetof(struct pt_regs, ARM_r7)},
+	{ "r8", 4, offsetof(struct pt_regs, ARM_r8)},
+	{ "r9", 4, offsetof(struct pt_regs, ARM_r9)},
+	{ "r10", 4, offsetof(struct pt_regs, ARM_r10)},
+	{ "fp", 4, offsetof(struct pt_regs, ARM_fp)},
+	{ "ip", 4, offsetof(struct pt_regs, ARM_ip)},
+	{ "sp", 4, offsetof(struct pt_regs, ARM_sp)},
+	{ "lr", 4, offsetof(struct pt_regs, ARM_lr)},
+	{ "pc", 4, offsetof(struct pt_regs, ARM_pc)},
+	{ "f0", 12, -1 },
+	{ "f1", 12, -1 },
+	{ "f2", 12, -1 },
+	{ "f3", 12, -1 },
+	{ "f4", 12, -1 },
+	{ "f5", 12, -1 },
+	{ "f6", 12, -1 },
+	{ "f7", 12, -1 },
+	{ "fps", 4, -1 },
+	{ "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+	if (regno >= DBG_MAX_REG_NUM || regno < 0)
+		return NULL;
+
+	if (dbg_reg_def[regno].offset != -1)
+		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+		       dbg_reg_def[regno].size);
+	else
+		memset(mem, 0, dbg_reg_def[regno].size);
+	return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+	if (regno >= DBG_MAX_REG_NUM || regno < 0)
+		return -EINVAL;
+
+	if (dbg_reg_def[regno].offset != -1)
+		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+		       dbg_reg_def[regno].size);
+	return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+	struct pt_regs *thread_regs;
+	int regno;
+
+	/* Just making sure... */
+	if (task == NULL)
+		return;
+
+	/* Initialize to zero */
+	for (regno = 0; regno < GDB_MAX_REGS; regno++)
+		gdb_regs[regno] = 0;
+
+	/* Otherwise, we have only some registers from switch_to() */
+	thread_regs		= task_pt_regs(task);
+	gdb_regs[_R0]		= thread_regs->ARM_r0;
+	gdb_regs[_R1]		= thread_regs->ARM_r1;
+	gdb_regs[_R2]		= thread_regs->ARM_r2;
+	gdb_regs[_R3]		= thread_regs->ARM_r3;
+	gdb_regs[_R4]		= thread_regs->ARM_r4;
+	gdb_regs[_R5]		= thread_regs->ARM_r5;
+	gdb_regs[_R6]		= thread_regs->ARM_r6;
+	gdb_regs[_R7]		= thread_regs->ARM_r7;
+	gdb_regs[_R8]		= thread_regs->ARM_r8;
+	gdb_regs[_R9]		= thread_regs->ARM_r9;
+	gdb_regs[_R10]		= thread_regs->ARM_r10;
+	gdb_regs[_FP]		= thread_regs->ARM_fp;
+	gdb_regs[_IP]		= thread_regs->ARM_ip;
+	gdb_regs[_SPT]		= thread_regs->ARM_sp;
+	gdb_regs[_LR]		= thread_regs->ARM_lr;
+	gdb_regs[_PC]		= thread_regs->ARM_pc;
+	gdb_regs[_CPSR]		= thread_regs->ARM_cpsr;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+	regs->ARM_pc = pc;
+}
+
+static int compiled_break;
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+			       int err_code, char *remcom_in_buffer,
+			       char *remcom_out_buffer,
+			       struct pt_regs *linux_regs)
+{
+	unsigned long addr;
+	char *ptr;
+
+	switch (remcom_in_buffer[0]) {
+	case 'D':
+	case 'k':
+	case 'c':
+		/*
+		 * Try to read optional parameter, pc unchanged if no parm.
+		 * If this was a compiled breakpoint, we need to move
+		 * to the next instruction or we will just breakpoint
+		 * over and over again.
+		 */
+		ptr = &remcom_in_buffer[1];
+		if (kgdb_hex2long(&ptr, &addr))
+			linux_regs->ARM_pc = addr;
+		else if (compiled_break == 1)
+			linux_regs->ARM_pc += 4;
+
+		compiled_break = 0;
+
+		return 0;
+	}
+
+	return -1;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
+{
+	kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+	return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
+{
+	compiled_break = 1;
+	kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+	return 0;
+}
+
+static struct undef_hook kgdb_brkpt_hook = {
+	.instr_mask		= 0xffffffff,
+	.instr_val		= KGDB_BREAKINST,
+	.fn			= kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_hook = {
+	.instr_mask		= 0xffffffff,
+	.instr_val		= KGDB_COMPILED_BREAK,
+	.fn			= kgdb_compiled_brk_fn
+};
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+       kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       local_irq_enable();
+       smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+       local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+	struct pt_regs *regs = args->regs;
+
+	if (kgdb_handle_exception(1, args->signr, cmd, regs))
+		return NOTIFY_DONE;
+	return NOTIFY_STOP;
+}
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+	ret = __kgdb_notify(ptr, cmd);
+	local_irq_restore(flags);
+
+	return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+	.notifier_call	= kgdb_notify,
+	.priority	= -INT_MAX,
+};
+
+
+/**
+ *	kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ *	This function will handle the initalization of any architecture
+ *	specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+	int ret = register_die_notifier(&kgdb_notifier);
+
+	if (ret != 0)
+		return ret;
+
+	register_undef_hook(&kgdb_brkpt_hook);
+	register_undef_hook(&kgdb_compiled_brkpt_hook);
+
+	return 0;
+}
+
+/**
+ *	kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ *	This function will handle the uninitalization of any architecture
+ *	specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+	unregister_undef_hook(&kgdb_brkpt_hook);
+	unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+	unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * Register our undef instruction hooks with ARM undef core.
+ * We regsiter a hook specifically looking for the KGB break inst
+ * and we handle the normal undef case within the do_undefinstr
+ * handler.
+ */
+struct kgdb_arch arch_kgdb_ops = {
+#ifndef __ARMEB__
+	.gdb_bpt_instr		= {0xfe, 0xde, 0xff, 0xe7}
+#else /* ! __ARMEB__ */
+	.gdb_bpt_instr		= {0xe7, 0xff, 0xde, 0xfe}
+#endif
+};
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-arm.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-arm.c
new file mode 100644
index 0000000..8a30c89
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-arm.c
@@ -0,0 +1,1005 @@
+/*
+ * arch/arm/kernel/kprobes-decode.c
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * We do not have hardware single-stepping on ARM, This
+ * effort is further complicated by the ARM not having a
+ * "next PC" register.  Instructions that change the PC
+ * can't be safely single-stepped in a MP environment, so
+ * we have a lot of work to do:
+ *
+ * In the prepare phase:
+ *   *) If it is an instruction that does anything
+ *      with the CPU mode, we reject it for a kprobe.
+ *      (This is out of laziness rather than need.  The
+ *      instructions could be simulated.)
+ *
+ *   *) Otherwise, decode the instruction rewriting its
+ *      registers to take fixed, ordered registers and
+ *      setting a handler for it to run the instruction.
+ *
+ * In the execution phase by an instruction's handler:
+ *
+ *   *) If the PC is written to by the instruction, the
+ *      instruction must be fully simulated in software.
+ *
+ *   *) Otherwise, a modified form of the instruction is
+ *      directly executed.  Its handler calls the
+ *      instruction in insn[0].  In insn[1] is a
+ *      "mov pc, lr" to return.
+ *
+ *      Before calling, load up the reordered registers
+ *      from the original instruction's registers.  If one
+ *      of the original input registers is the PC, compute
+ *      and adjust the appropriate input register.
+ *
+ *	After call completes, copy the output registers to
+ *      the original instruction's original registers.
+ *
+ * We don't use a real breakpoint instruction since that
+ * would have us in the kernel go from SVC mode to SVC
+ * mode losing the link register.  Instead we use an
+ * undefined instruction.  To simplify processing, the
+ * undefined instruction used for kprobes must be reserved
+ * exclusively for kprobes use.
+ *
+ * TODO: ifdef out some instruction decoding based on architecture.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+
+#include "kprobes.h"
+
+#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
+
+#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
+
+#if  __LINUX_ARM_ARCH__ >= 6
+#define BLX(reg)	"blx	"reg"		\n\t"
+#else
+#define BLX(reg)	"mov	lr, pc		\n\t"	\
+			"mov	pc, "reg"	\n\t"
+#endif
+
+/*
+ * To avoid the complications of mimicing single-stepping on a
+ * processor without a Next-PC or a single-step mode, and to
+ * avoid having to deal with the side-effects of boosting, we
+ * simulate or emulate (almost) all ARM instructions.
+ *
+ * "Simulation" is where the instruction's behavior is duplicated in
+ * C code.  "Emulation" is where the original instruction is rewritten
+ * and executed, often by altering its registers.
+ *
+ * By having all behavior of the kprobe'd instruction completed before
+ * returning from the kprobe_handler(), all locks (scheduler and
+ * interrupt) can safely be released.  There is no need for secondary
+ * breakpoints, no race with MP or preemptable kernels, nor having to
+ * clean up resources counts at a later time impacting overall system
+ * performance.  By rewriting the instruction, only the minimum registers
+ * need to be loaded and saved back optimizing performance.
+ *
+ * Calling the insnslot_*_rwflags version of a function doesn't hurt
+ * anything even when the CPSR flags aren't updated by the
+ * instruction.  It's just a little slower in return for saving
+ * a little space by not having a duplicate function that doesn't
+ * update the flags.  (The same optimization can be said for
+ * instructions that do or don't perform register writeback)
+ * Also, instructions can either read the flags, only write the
+ * flags, or read and write the flags.  To save combinations
+ * rather than for sheer performance, flag functions just assume
+ * read and write of flags.
+ */
+
+static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	long iaddr = (long)p->addr;
+	int disp  = branch_displacement(insn);
+
+	if (insn & (1 << 24))
+		regs->ARM_lr = iaddr + 4;
+
+	regs->ARM_pc = iaddr + 8 + disp;
+}
+
+static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	long iaddr = (long)p->addr;
+	int disp = branch_displacement(insn);
+
+	regs->ARM_lr = iaddr + 4;
+	regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
+	regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rm = insn & 0xf;
+	long rmv = regs->uregs[rm];
+
+	if (insn & (1 << 5))
+		regs->ARM_lr = (long)p->addr + 4;
+
+	regs->ARM_pc = rmv & ~0x1;
+	regs->ARM_cpsr &= ~PSR_T_BIT;
+	if (rmv & 0x1)
+		regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 12) & 0xf;
+	unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+	regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
+static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->uregs[12] = regs->uregs[13];
+}
+
+static void __kprobes
+emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = (unsigned long)p->addr + 8;
+	int rt = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rtv asm("r0") = regs->uregs[rt];
+	register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
+	register unsigned long rnv asm("r2") = (rn == 15) ? pc
+							  : regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		BLX("%[fn]")
+		: "=r" (rtv), "=r" (rt2v), "=r" (rnv)
+		: "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
+		  [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rt] = rtv;
+	regs->uregs[rt+1] = rt2v;
+	if (is_writeback(insn))
+		regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_ldr(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = (unsigned long)p->addr + 8;
+	int rt = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rtv asm("r0");
+	register unsigned long rnv asm("r2") = (rn == 15) ? pc
+							  : regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		BLX("%[fn]")
+		: "=r" (rtv), "=r" (rnv)
+		: "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	if (rt == 15)
+		load_write_pc(rtv, regs);
+	else
+		regs->uregs[rt] = rtv;
+
+	if (is_writeback(insn))
+		regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_str(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long rtpc = (unsigned long)p->addr + str_pc_offset;
+	unsigned long rnpc = (unsigned long)p->addr + 8;
+	int rt = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
+							  : regs->uregs[rt];
+	register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
+							  : regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		BLX("%[fn]")
+		: "=r" (rnv)
+		: "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	if (is_writeback(insn))
+		regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = (unsigned long)p->addr + 8;
+	int rd = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+	int rs = (insn >> 8) & 0xf;
+
+	register unsigned long rdv asm("r0") = regs->uregs[rd];
+	register unsigned long rnv asm("r2") = (rn == 15) ? pc
+							  : regs->uregs[rn];
+	register unsigned long rmv asm("r3") = (rm == 15) ? pc
+							  : regs->uregs[rm];
+	register unsigned long rsv asm("r1") = regs->uregs[rs];
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		BLX("%[fn]")
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdv), [cpsr] "=r" (cpsr)
+		: "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+		  "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	if (rd == 15)
+		alu_write_pc(rdv, regs);
+	else
+		regs->uregs[rd] = rdv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rdv asm("r0") = regs->uregs[rd];
+	register unsigned long rnv asm("r2") = regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		BLX("%[fn]")
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdv), [cpsr] "=r" (cpsr)
+		: "0" (rdv), "r" (rnv), "r" (rmv),
+		  "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 16) & 0xf;
+	int rn = (insn >> 12) & 0xf;
+	int rm = insn & 0xf;
+	int rs = (insn >> 8) & 0xf;
+
+	register unsigned long rdv asm("r2") = regs->uregs[rd];
+	register unsigned long rnv asm("r0") = regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+	register unsigned long rsv asm("r1") = regs->uregs[rs];
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		BLX("%[fn]")
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdv), [cpsr] "=r" (cpsr)
+		: "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+		  "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 12) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rdv asm("r0") = regs->uregs[rd];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		BLX("%[fn]")
+		: "=r" (rdv)
+		: "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rdlo = (insn >> 12) & 0xf;
+	int rdhi = (insn >> 16) & 0xf;
+	int rn = insn & 0xf;
+	int rm = (insn >> 8) & 0xf;
+
+	register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+	register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
+	register unsigned long rnv asm("r3") = regs->uregs[rn];
+	register unsigned long rmv asm("r1") = regs->uregs[rm];
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		BLX("%[fn]")
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
+		: "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+		  "2" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rdlo] = rdlov;
+	regs->uregs[rdhi] = rdhiv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+/*
+ * For the instruction masking and comparisons in all the "space_*"
+ * functions below, Do _not_ rearrange the order of tests unless
+ * you're very, very sure of what you are doing.  For the sake of
+ * efficiency, the masks for some tests sometimes assume other test
+ * have been done prior to them so the number of patterns to test
+ * for an instruction set can be as broad as possible to reduce the
+ * number of tests needed.
+ */
+
+static const union decode_item arm_1111_table[] = {
+	/* Unconditional instructions					*/
+
+	/* memory hint		1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */
+	/* PLDI (immediate)	1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */
+	/* PLDW (immediate)	1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */
+	/* PLD (immediate)	1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_SIMULATE	(0xfe300000, 0xf4100000, kprobe_simulate_nop),
+
+	/* memory hint		1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */
+	/* PLDI (register)	1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */
+	/* PLDW (register)	1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */
+	/* PLD (register)	1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */
+	DECODE_SIMULATE	(0xfe300010, 0xf6100000, kprobe_simulate_nop),
+
+	/* BLX (immediate)	1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */
+	DECODE_SIMULATE	(0xfe000000, 0xfa000000, simulate_blx1),
+
+	/* CPS			1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
+	/* SETEND		1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
+	/* SRS			1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
+	/* RFE			1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+
+	/* Coprocessor instructions... */
+	/* MCRR2		1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */
+	/* MRRC2		1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */
+	/* LDC2			1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+	/* STC2			1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+	/* CDP2			1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+	/* MCR2			1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+	/* MRC2			1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = {
+	/* Miscellaneous instructions					*/
+
+	/* MRS cpsr		cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
+	DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs,
+						 REGS(0, NOPC, 0, 0, 0)),
+
+	/* BX			cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
+	DECODE_SIMULATE	(0x0ff000f0, 0x01200010, simulate_blx2bx),
+
+	/* BLX (register)	cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
+	DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx,
+						 REGS(0, 0, 0, 0, NOPC)),
+
+	/* CLZ			cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
+	DECODE_EMULATEX	(0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPC)),
+
+	/* QADD			cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */
+	/* QSUB			cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */
+	/* QDADD		cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */
+	/* QDSUB		cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */
+	DECODE_EMULATEX	(0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+	/* BXJ			cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
+	/* MSR			cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
+	/* MRS spsr		cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
+	/* BKPT			1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
+	/* SMC			cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
+	/* And unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = {
+	/* Halfword multiply and multiply-accumulate			*/
+
+	/* SMLALxy		cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
+	DECODE_EMULATEX	(0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	/* SMULWy		cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
+	DECODE_OR	(0x0ff000b0, 0x012000a0),
+	/* SMULxy		cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
+	DECODE_EMULATEX	(0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+	/* SMLAxy		cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */
+	DECODE_OR	(0x0ff00090, 0x01000080),
+	/* SMLAWy		cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */
+	DECODE_EMULATEX	(0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0000_____1001_table[] = {
+	/* Multiply and multiply-accumulate				*/
+
+	/* MUL			cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */
+	/* MULS			cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_EMULATEX	(0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+	/* MLA			cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */
+	/* MLAS			cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_OR	(0x0fe000f0, 0x00200090),
+	/* MLS			cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_EMULATEX	(0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	/* UMAAL		cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_OR	(0x0ff000f0, 0x00400090),
+	/* UMULL		cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */
+	/* UMULLS		cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */
+	/* UMLAL		cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */
+	/* UMLALS		cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */
+	/* SMULL		cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */
+	/* SMULLS		cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */
+	/* SMLAL		cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */
+	/* SMLALS		cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_EMULATEX	(0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_____1001_table[] = {
+	/* Synchronization primitives					*/
+
+#if __LINUX_ARM_ARCH__ < 6
+	/* Deprecated on ARMv6 and may be UNDEFINED on v7		*/
+	/* SMP/SWPB		cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
+	DECODE_EMULATEX	(0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPC, NOPC, 0, 0, NOPC)),
+#endif
+	/* LDREX/STREX{,D,B,H}	cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
+	/* And unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_000x_____1xx1_table[] = {
+	/* Extra load/store instructions				*/
+
+	/* STRHT		cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */
+	/* ???			cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */
+	/* LDRHT		cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */
+	/* LDRSBT		cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */
+	/* LDRSHT		cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_REJECT	(0x0f200090, 0x00200090),
+
+	/* LDRD/STRD lr,pc,{...	cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */
+	DECODE_REJECT	(0x0e10e0d0, 0x0000e0d0),
+
+	/* LDRD (register)	cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */
+	/* STRD (register)	cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_EMULATEX	(0x0e5000d0, 0x000000d0, emulate_ldrdstrd,
+						 REGS(NOPCWB, NOPCX, 0, 0, NOPC)),
+
+	/* LDRD (immediate)	cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */
+	/* STRD (immediate)	cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_EMULATEX	(0x0e5000d0, 0x004000d0, emulate_ldrdstrd,
+						 REGS(NOPCWB, NOPCX, 0, 0, 0)),
+
+	/* STRH (register)	cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */
+	DECODE_EMULATEX	(0x0e5000f0, 0x000000b0, emulate_str,
+						 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
+
+	/* LDRH (register)	cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */
+	/* LDRSB (register)	cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */
+	/* LDRSH (register)	cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_EMULATEX	(0x0e500090, 0x00100090, emulate_ldr,
+						 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
+
+	/* STRH (immediate)	cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */
+	DECODE_EMULATEX	(0x0e5000f0, 0x004000b0, emulate_str,
+						 REGS(NOPCWB, NOPC, 0, 0, 0)),
+
+	/* LDRH (immediate)	cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */
+	/* LDRSB (immediate)	cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */
+	/* LDRSH (immediate)	cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_EMULATEX	(0x0e500090, 0x00500090, emulate_ldr,
+						 REGS(NOPCWB, NOPC, 0, 0, 0)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_000x_table[] = {
+	/* Data-processing (register)					*/
+
+	/* <op>S PC, ...	cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0e10f000, 0x0010f000),
+
+	/* MOV IP, SP		1110 0001 1010 0000 1100 0000 0000 1101 */
+	DECODE_SIMULATE	(0xffffffff, 0xe1a0c00d, simulate_mov_ipsp),
+
+	/* TST (register)	cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */
+	/* TEQ (register)	cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */
+	/* CMP (register)	cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */
+	/* CMN (register)	cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */
+	DECODE_EMULATEX	(0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, 0, 0, 0, ANY)),
+
+	/* MOV (register)	cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */
+	/* MVN (register)	cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */
+	DECODE_EMULATEX	(0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(0, ANY, 0, 0, ANY)),
+
+	/* AND (register)	cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */
+	/* EOR (register)	cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */
+	/* SUB (register)	cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */
+	/* RSB (register)	cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */
+	/* ADD (register)	cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */
+	/* ADC (register)	cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */
+	/* SBC (register)	cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */
+	/* RSC (register)	cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */
+	/* ORR (register)	cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */
+	/* BIC (register)	cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */
+	DECODE_EMULATEX	(0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, ANY, 0, 0, ANY)),
+
+	/* TST (reg-shift reg)	cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */
+	/* TEQ (reg-shift reg)	cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */
+	/* CMP (reg-shift reg)	cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
+	/* CMN (reg-shift reg)	cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
+	DECODE_EMULATEX	(0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, 0, NOPC, 0, ANY)),
+
+	/* MOV (reg-shift reg)	cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
+	/* MVN (reg-shift reg)	cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
+	DECODE_EMULATEX	(0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(0, ANY, NOPC, 0, ANY)),
+
+	/* AND (reg-shift reg)	cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
+	/* EOR (reg-shift reg)	cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
+	/* SUB (reg-shift reg)	cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */
+	/* RSB (reg-shift reg)	cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */
+	/* ADD (reg-shift reg)	cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */
+	/* ADC (reg-shift reg)	cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */
+	/* SBC (reg-shift reg)	cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */
+	/* RSC (reg-shift reg)	cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */
+	/* ORR (reg-shift reg)	cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
+	/* BIC (reg-shift reg)	cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
+	DECODE_EMULATEX	(0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, ANY, NOPC, 0, ANY)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_001x_table[] = {
+	/* Data-processing (immediate)					*/
+
+	/* MOVW			cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
+	/* MOVT			cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, 0)),
+
+	/* YIELD		cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
+	DECODE_OR	(0x0fff00ff, 0x03200001),
+	/* SEV			cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
+	DECODE_EMULATE	(0x0fff00ff, 0x03200004, kprobe_emulate_none),
+	/* NOP			cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
+	/* WFE			cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
+	/* WFI			cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
+	DECODE_SIMULATE	(0x0fff00fc, 0x03200000, kprobe_simulate_nop),
+	/* DBG			cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
+	/* unallocated hints	cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
+	/* MSR (immediate)	cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0fb00000, 0x03200000),
+
+	/* <op>S PC, ...	cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0e10f000, 0x0210f000),
+
+	/* TST (immediate)	cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */
+	/* TEQ (immediate)	cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */
+	/* CMP (immediate)	cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */
+	/* CMN (immediate)	cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, 0, 0, 0, 0)),
+
+	/* MOV (immediate)	cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */
+	/* MVN (immediate)	cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(0, ANY, 0, 0, 0)),
+
+	/* AND (immediate)	cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */
+	/* EOR (immediate)	cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */
+	/* SUB (immediate)	cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */
+	/* RSB (immediate)	cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */
+	/* ADD (immediate)	cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */
+	/* ADC (immediate)	cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */
+	/* SBC (immediate)	cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */
+	/* RSC (immediate)	cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */
+	/* ORR (immediate)	cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */
+	/* BIC (immediate)	cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags,
+						 REGS(ANY, ANY, 0, 0, 0)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0110_____xxx1_table[] = {
+	/* Media instructions						*/
+
+	/* SEL			cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */
+	DECODE_EMULATEX	(0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+	/* SSAT			cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */
+	/* USAT			cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */
+	DECODE_OR(0x0fa00030, 0x06a00010),
+	/* SSAT16		cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */
+	/* USAT16		cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */
+	DECODE_EMULATEX	(0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPC)),
+
+	/* REV			cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
+	/* REV16		cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
+	/* RBIT			cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
+	/* REVSH		cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
+	DECODE_EMULATEX	(0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPC)),
+
+	/* ???			cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */
+	DECODE_REJECT	(0x0fb00010, 0x06000010),
+	/* ???			cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */
+	DECODE_REJECT	(0x0f8000f0, 0x060000b0),
+	/* ???			cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */
+	DECODE_REJECT	(0x0f8000f0, 0x060000d0),
+	/* SADD16		cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */
+	/* SADDSUBX		cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */
+	/* SSUBADDX		cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */
+	/* SSUB16		cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */
+	/* SADD8		cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */
+	/* SSUB8		cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */
+	/* QADD16		cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */
+	/* QADDSUBX		cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */
+	/* QSUBADDX		cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */
+	/* QSUB16		cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */
+	/* QADD8		cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */
+	/* QSUB8		cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */
+	/* SHADD16		cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */
+	/* SHADDSUBX		cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */
+	/* SHSUBADDX		cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */
+	/* SHSUB16		cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */
+	/* SHADD8		cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */
+	/* SHSUB8		cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */
+	/* UADD16		cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */
+	/* UADDSUBX		cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */
+	/* USUBADDX		cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */
+	/* USUB16		cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */
+	/* UADD8		cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */
+	/* USUB8		cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */
+	/* UQADD16		cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */
+	/* UQADDSUBX		cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */
+	/* UQSUBADDX		cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */
+	/* UQSUB16		cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */
+	/* UQADD8		cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */
+	/* UQSUB8		cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */
+	/* UHADD16		cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */
+	/* UHADDSUBX		cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */
+	/* UHSUBADDX		cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */
+	/* UHSUB16		cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */
+	/* UHADD8		cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */
+	/* UHSUB8		cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_EMULATEX	(0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+	/* PKHBT		cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */
+	/* PKHTB		cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */
+	DECODE_EMULATEX	(0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+	/* ???			cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */
+	/* ???			cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */
+	DECODE_REJECT	(0x0fb000f0, 0x06900070),
+
+	/* SXTB16		cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */
+	/* SXTB			cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */
+	/* SXTH			cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */
+	/* UXTB16		cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */
+	/* UXTB			cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */
+	/* UXTH			cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */
+	DECODE_EMULATEX	(0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPC)),
+
+	/* SXTAB16		cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */
+	/* SXTAB		cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */
+	/* SXTAH		cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */
+	/* UXTAB16		cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */
+	/* UXTAB		cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */
+	/* UXTAH		cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */
+	DECODE_EMULATEX	(0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc,
+						 REGS(NOPCX, NOPC, 0, 0, NOPC)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_0111_____xxx1_table[] = {
+	/* Media instructions						*/
+
+	/* UNDEFINED		cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
+	DECODE_REJECT	(0x0ff000f0, 0x07f000f0),
+
+	/* SMLALD		cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
+	/* SMLSLD		cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
+	DECODE_EMULATEX	(0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	/* SMUAD		cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */
+	/* SMUSD		cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */
+	DECODE_OR	(0x0ff0f090, 0x0700f010),
+	/* SMMUL		cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */
+	DECODE_OR	(0x0ff0f0d0, 0x0750f010),
+	/* USAD8		cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
+	DECODE_EMULATEX	(0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+	/* SMLAD		cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */
+	/* SMLSD		cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */
+	DECODE_OR	(0x0ff00090, 0x07000010),
+	/* SMMLA		cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */
+	DECODE_OR	(0x0ff000d0, 0x07500010),
+	/* USADA8		cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
+	DECODE_EMULATEX	(0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, NOPCX, NOPC, 0, NOPC)),
+
+	/* SMMLS		cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */
+	DECODE_EMULATEX	(0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc,
+						 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+	/* SBFX			cccc 0111 101x xxxx xxxx xxxx x101 xxxx */
+	/* UBFX			cccc 0111 111x xxxx xxxx xxxx x101 xxxx */
+	DECODE_EMULATEX	(0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPC)),
+
+	/* BFC			cccc 0111 110x xxxx xxxx xxxx x001 1111 */
+	DECODE_EMULATEX	(0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, 0)),
+
+	/* BFI			cccc 0111 110x xxxx xxxx xxxx x001 xxxx */
+	DECODE_EMULATEX	(0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc,
+						 REGS(0, NOPC, 0, 0, NOPCX)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_01xx_table[] = {
+	/* Load/store word and unsigned byte				*/
+
+	/* LDRB/STRB pc,[...]	cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0c40f000, 0x0440f000),
+
+	/* STRT			cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRT			cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
+	/* STRBT		cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRBT		cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0d200000, 0x04200000),
+
+	/* STR (immediate)	cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */
+	/* STRB (immediate)	cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0e100000, 0x04000000, emulate_str,
+						 REGS(NOPCWB, ANY, 0, 0, 0)),
+
+	/* LDR (immediate)	cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRB (immediate)	cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0e100000, 0x04100000, emulate_ldr,
+						 REGS(NOPCWB, ANY, 0, 0, 0)),
+
+	/* STR (register)	cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */
+	/* STRB (register)	cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0e100000, 0x06000000, emulate_str,
+						 REGS(NOPCWB, ANY, 0, 0, NOPC)),
+
+	/* LDR (register)	cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRB (register)	cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0x0e100000, 0x06100000, emulate_ldr,
+						 REGS(NOPCWB, ANY, 0, 0, NOPC)),
+
+	DECODE_END
+};
+
+static const union decode_item arm_cccc_100x_table[] = {
+	/* Block data transfer instructions				*/
+
+	/* LDM			cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+	/* STM			cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_CUSTOM	(0x0e400000, 0x08000000, kprobe_decode_ldmstm),
+
+	/* STM (user registers)	cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
+	/* LDM (user registers)	cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */
+	/* LDM (exception ret)	cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
+	DECODE_END
+};
+
+const union decode_item kprobe_decode_arm_table[] = {
+	/*
+	 * Unconditional instructions
+	 *			1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xf0000000, 0xf0000000, arm_1111_table),
+
+	/*
+	 * Miscellaneous instructions
+	 *			cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx
+	 */
+	DECODE_TABLE	(0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table),
+
+	/*
+	 * Halfword multiply and multiply-accumulate
+	 *			cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx
+	 */
+	DECODE_TABLE	(0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table),
+
+	/*
+	 * Multiply and multiply-accumulate
+	 *			cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx
+	 */
+	DECODE_TABLE	(0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table),
+
+	/*
+	 * Synchronization primitives
+	 *			cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx
+	 */
+	DECODE_TABLE	(0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table),
+
+	/*
+	 * Extra load/store instructions
+	 *			cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx
+	 */
+	DECODE_TABLE	(0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table),
+
+	/*
+	 * Data-processing (register)
+	 *			cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx
+	 * Data-processing (register-shifted register)
+	 *			cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx
+	 */
+	DECODE_TABLE	(0x0e000000, 0x00000000, arm_cccc_000x_table),
+
+	/*
+	 * Data-processing (immediate)
+	 *			cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0x0e000000, 0x02000000, arm_cccc_001x_table),
+
+	/*
+	 * Media instructions
+	 *			cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx
+	 */
+	DECODE_TABLE	(0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table),
+	DECODE_TABLE	(0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table),
+
+	/*
+	 * Load/store word and unsigned byte
+	 *			cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0x0c000000, 0x04000000, arm_cccc_01xx_table),
+
+	/*
+	 * Block data transfer instructions
+	 *			cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0x0e000000, 0x08000000, arm_cccc_100x_table),
+
+	/* B			cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
+	/* BL			cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
+	DECODE_SIMULATE	(0x0e000000, 0x0a000000, simulate_bbl),
+
+	/*
+	 * Supervisor Call, and coprocessor instructions
+	 */
+
+	/* MCRR			cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */
+	/* MRRC			cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */
+	/* LDC			cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+	/* STC			cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+	/* CDP			cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+	/* MCR			cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+	/* MRC			cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+	/* SVC			cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0x0c000000, 0x0c000000),
+
+	DECODE_END
+};
+#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
+EXPORT_SYMBOL_GPL(kprobe_decode_arm_table);
+#endif
+
+static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_pc += 4;
+	p->ainsn.insn_handler(p, regs);
+}
+
+/* Return:
+ *   INSN_REJECTED     If instruction is one not allowed to kprobe,
+ *   INSN_GOOD         If instruction is supported and uses instruction slot,
+ *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ *
+ * For instructions we don't want to kprobe (INSN_REJECTED return result):
+ *   These are generally ones that modify the processor state making
+ *   them "hard" to simulate such as switches processor modes or
+ *   make accesses in alternate modes.  Any of these could be simulated
+ *   if the work was put into it, but low return considering they
+ *   should also be very rare.
+ */
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	asi->insn_singlestep = arm_singlestep;
+	asi->insn_check_cc = kprobe_condition_checks[insn>>28];
+	return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-common.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-common.c
new file mode 100644
index 0000000..18a7628
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-common.c
@@ -0,0 +1,578 @@
+/*
+ * arch/arm/kernel/kprobes-common.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <asm/system_info.h>
+
+#include "kprobes.h"
+
+
+#ifndef find_str_pc_offset
+
+/*
+ * For STR and STM instructions, an ARM core may choose to use either
+ * a +8 or a +12 displacement from the current instruction's address.
+ * Whichever value is chosen for a given core, it must be the same for
+ * both instructions and may not change.  This function measures it.
+ */
+
+int str_pc_offset;
+
+void __init find_str_pc_offset(void)
+{
+	int addr, scratch, ret;
+
+	__asm__ (
+		"sub	%[ret], pc, #4		\n\t"
+		"str	pc, %[addr]		\n\t"
+		"ldr	%[scr], %[addr]		\n\t"
+		"sub	%[ret], %[scr], %[ret]	\n\t"
+		: [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
+
+	str_pc_offset = ret;
+}
+
+#endif /* !find_str_pc_offset */
+
+
+#ifndef test_load_write_pc_interworking
+
+bool load_write_pc_interworks;
+
+void __init test_load_write_pc_interworking(void)
+{
+	int arch = cpu_architecture();
+	BUG_ON(arch == CPU_ARCH_UNKNOWN);
+	load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
+}
+
+#endif /* !test_load_write_pc_interworking */
+
+
+#ifndef test_alu_write_pc_interworking
+
+bool alu_write_pc_interworks;
+
+void __init test_alu_write_pc_interworking(void)
+{
+	int arch = cpu_architecture();
+	BUG_ON(arch == CPU_ARCH_UNKNOWN);
+	alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
+}
+
+#endif /* !test_alu_write_pc_interworking */
+
+
+void __init arm_kprobe_decode_init(void)
+{
+	find_str_pc_offset();
+	test_load_write_pc_interworking();
+	test_alu_write_pc_interworking();
+}
+
+
+static unsigned long __kprobes __check_eq(unsigned long cpsr)
+{
+	return cpsr & PSR_Z_BIT;
+}
+
+static unsigned long __kprobes __check_ne(unsigned long cpsr)
+{
+	return (~cpsr) & PSR_Z_BIT;
+}
+
+static unsigned long __kprobes __check_cs(unsigned long cpsr)
+{
+	return cpsr & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_cc(unsigned long cpsr)
+{
+	return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_mi(unsigned long cpsr)
+{
+	return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_pl(unsigned long cpsr)
+{
+	return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_vs(unsigned long cpsr)
+{
+	return cpsr & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_vc(unsigned long cpsr)
+{
+	return (~cpsr) & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_hi(unsigned long cpsr)
+{
+	cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+	return cpsr & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_ls(unsigned long cpsr)
+{
+	cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+	return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_ge(unsigned long cpsr)
+{
+	cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+	return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_lt(unsigned long cpsr)
+{
+	cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+	return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_gt(unsigned long cpsr)
+{
+	unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+	temp |= (cpsr << 1);			 /* PSR_N_BIT |= PSR_Z_BIT */
+	return (~temp) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_le(unsigned long cpsr)
+{
+	unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+	temp |= (cpsr << 1);			 /* PSR_N_BIT |= PSR_Z_BIT */
+	return temp & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_al(unsigned long cpsr)
+{
+	return true;
+}
+
+kprobe_check_cc * const kprobe_condition_checks[16] = {
+	&__check_eq, &__check_ne, &__check_cs, &__check_cc,
+	&__check_mi, &__check_pl, &__check_vs, &__check_vc,
+	&__check_hi, &__check_ls, &__check_ge, &__check_lt,
+	&__check_gt, &__check_le, &__check_al, &__check_al
+};
+
+
+void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
+{
+}
+
+void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
+{
+	p->ainsn.insn_fn();
+}
+
+static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rn = (insn >> 16) & 0xf;
+	int lbit = insn & (1 << 20);
+	int wbit = insn & (1 << 21);
+	int ubit = insn & (1 << 23);
+	int pbit = insn & (1 << 24);
+	long *addr = (long *)regs->uregs[rn];
+	int reg_bit_vector;
+	int reg_count;
+
+	reg_count = 0;
+	reg_bit_vector = insn & 0xffff;
+	while (reg_bit_vector) {
+		reg_bit_vector &= (reg_bit_vector - 1);
+		++reg_count;
+	}
+
+	if (!ubit)
+		addr -= reg_count;
+	addr += (!pbit == !ubit);
+
+	reg_bit_vector = insn & 0xffff;
+	while (reg_bit_vector) {
+		int reg = __ffs(reg_bit_vector);
+		reg_bit_vector &= (reg_bit_vector - 1);
+		if (lbit)
+			regs->uregs[reg] = *addr++;
+		else
+			*addr++ = regs->uregs[reg];
+	}
+
+	if (wbit) {
+		if (!ubit)
+			addr -= reg_count;
+		addr -= (!pbit == !ubit);
+		regs->uregs[rn] = (long)addr;
+	}
+}
+
+static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_pc = (long)p->addr + str_pc_offset;
+	simulate_ldm1stm1(p, regs);
+	regs->ARM_pc = (long)p->addr + 4;
+}
+
+static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
+{
+	simulate_ldm1stm1(p, regs);
+	load_write_pc(regs->ARM_pc, regs);
+}
+
+static void __kprobes
+emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+	register void *rregs asm("r1") = regs;
+	register void *rfn asm("lr") = p->ainsn.insn_fn;
+
+	__asm__ __volatile__ (
+		"stmdb	sp!, {%[regs], r11}	\n\t"
+		"ldmia	%[regs], {r0-r12}	\n\t"
+#if __LINUX_ARM_ARCH__ >= 6
+		"blx	%[fn]			\n\t"
+#else
+		"str	%[fn], [sp, #-4]!	\n\t"
+		"adr	lr, 1f			\n\t"
+		"ldr	pc, [sp], #4		\n\t"
+		"1:				\n\t"
+#endif
+		"ldr	lr, [sp], #4		\n\t" /* lr = regs */
+		"stmia	lr, {r0-r12}		\n\t"
+		"ldr	r11, [sp], #4		\n\t"
+		: [regs] "=r" (rregs), [fn] "=r" (rfn)
+		: "0" (rregs), "1" (rfn)
+		: "r0", "r2", "r3", "r4", "r5", "r6", "r7",
+		  "r8", "r9", "r10", "r12", "memory", "cc"
+		);
+}
+
+static void __kprobes
+emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+	emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
+}
+
+static void __kprobes
+emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
+{
+	emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
+	load_write_pc(regs->ARM_pc, regs);
+}
+
+enum kprobe_insn __kprobes
+kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	kprobe_insn_handler_t *handler = 0;
+	unsigned reglist = insn & 0xffff;
+	int is_ldm = insn & 0x100000;
+	int rn = (insn >> 16) & 0xf;
+
+	if (rn <= 12 && (reglist & 0xe000) == 0) {
+		/* Instruction only uses registers in the range R0..R12 */
+		handler = emulate_generic_r0_12_noflags;
+
+	} else if (rn >= 2 && (reglist & 0x8003) == 0) {
+		/* Instruction only uses registers in the range R2..R14 */
+		rn -= 2;
+		reglist >>= 2;
+		handler = emulate_generic_r2_14_noflags;
+
+	} else if (rn >= 3 && (reglist & 0x0007) == 0) {
+		/* Instruction only uses registers in the range R3..R15 */
+		if (is_ldm && (reglist & 0x8000)) {
+			rn -= 3;
+			reglist >>= 3;
+			handler = emulate_ldm_r3_15;
+		}
+	}
+
+	if (handler) {
+		/* We can emulate the instruction in (possibly) modified form */
+		asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+		asi->insn_handler = handler;
+		return INSN_GOOD;
+	}
+
+	/* Fallback to slower simulation... */
+	if (reglist & 0x8000)
+		handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
+	else
+		handler = simulate_ldm1stm1;
+	asi->insn_handler = handler;
+	return INSN_GOOD_NO_SLOT;
+}
+
+
+/*
+ * Prepare an instruction slot to receive an instruction for emulating.
+ * This is done by placing a subroutine return after the location where the
+ * instruction will be placed. We also modify ARM instructions to be
+ * unconditional as the condition code will already be checked before any
+ * emulation handler is called.
+ */
+static kprobe_opcode_t __kprobes
+prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+								bool thumb)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	if (thumb) {
+		u16 *thumb_insn = (u16 *)asi->insn;
+		thumb_insn[1] = 0x4770; /* Thumb bx lr */
+		thumb_insn[2] = 0x4770; /* Thumb bx lr */
+		return insn;
+	}
+	asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+#else
+	asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+#endif
+	/* Make an ARM instruction unconditional */
+	if (insn < 0xe0000000)
+		insn = (insn | 0xe0000000) & ~0x10000000;
+	return insn;
+}
+
+/*
+ * Write a (probably modified) instruction into the slot previously prepared by
+ * prepare_emulated_insn
+ */
+static void  __kprobes
+set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+								bool thumb)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	if (thumb) {
+		u16 *ip = (u16 *)asi->insn;
+		if (is_wide_instruction(insn))
+			*ip++ = insn >> 16;
+		*ip++ = insn;
+		return;
+	}
+#endif
+	asi->insn[0] = insn;
+}
+
+/*
+ * When we modify the register numbers encoded in an instruction to be emulated,
+ * the new values come from this define. For ARM and 32-bit Thumb instructions
+ * this gives...
+ *
+ *	bit position	  16  12   8   4   0
+ *	---------------+---+---+---+---+---+
+ *	register	 r2  r0  r1  --  r3
+ */
+#define INSN_NEW_BITS		0x00020103
+
+/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
+#define INSN_SAMEAS16_BITS	0x22222222
+
+/*
+ * Validate and modify each of the registers encoded in an instruction.
+ *
+ * Each nibble in regs contains a value from enum decode_reg_type. For each
+ * non-zero value, the corresponding nibble in pinsn is validated and modified
+ * according to the type.
+ */
+static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
+{
+	kprobe_opcode_t insn = *pinsn;
+	kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
+
+	for (; regs != 0; regs >>= 4, mask <<= 4) {
+
+		kprobe_opcode_t new_bits = INSN_NEW_BITS;
+
+		switch (regs & 0xf) {
+
+		case REG_TYPE_NONE:
+			/* Nibble not a register, skip to next */
+			continue;
+
+		case REG_TYPE_ANY:
+			/* Any register is allowed */
+			break;
+
+		case REG_TYPE_SAMEAS16:
+			/* Replace register with same as at bit position 16 */
+			new_bits = INSN_SAMEAS16_BITS;
+			break;
+
+		case REG_TYPE_SP:
+			/* Only allow SP (R13) */
+			if ((insn ^ 0xdddddddd) & mask)
+				goto reject;
+			break;
+
+		case REG_TYPE_PC:
+			/* Only allow PC (R15) */
+			if ((insn ^ 0xffffffff) & mask)
+				goto reject;
+			break;
+
+		case REG_TYPE_NOSP:
+			/* Reject SP (R13) */
+			if (((insn ^ 0xdddddddd) & mask) == 0)
+				goto reject;
+			break;
+
+		case REG_TYPE_NOSPPC:
+		case REG_TYPE_NOSPPCX:
+			/* Reject SP and PC (R13 and R15) */
+			if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
+				goto reject;
+			break;
+
+		case REG_TYPE_NOPCWB:
+			if (!is_writeback(insn))
+				break; /* No writeback, so any register is OK */
+			/* fall through... */
+		case REG_TYPE_NOPC:
+		case REG_TYPE_NOPCX:
+			/* Reject PC (R15) */
+			if (((insn ^ 0xffffffff) & mask) == 0)
+				goto reject;
+			break;
+		}
+
+		/* Replace value of nibble with new register number... */
+		insn &= ~mask;
+		insn |= new_bits & mask;
+	}
+
+	*pinsn = insn;
+	return true;
+
+reject:
+	return false;
+}
+
+static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
+	[DECODE_TYPE_TABLE]	= sizeof(struct decode_table),
+	[DECODE_TYPE_CUSTOM]	= sizeof(struct decode_custom),
+	[DECODE_TYPE_SIMULATE]	= sizeof(struct decode_simulate),
+	[DECODE_TYPE_EMULATE]	= sizeof(struct decode_emulate),
+	[DECODE_TYPE_OR]	= sizeof(struct decode_or),
+	[DECODE_TYPE_REJECT]	= sizeof(struct decode_reject)
+};
+
+/*
+ * kprobe_decode_insn operates on data tables in order to decode an ARM
+ * architecture instruction onto which a kprobe has been placed.
+ *
+ * These instruction decoding tables are a concatenation of entries each
+ * of which consist of one of the following structs:
+ *
+ *	decode_table
+ *	decode_custom
+ *	decode_simulate
+ *	decode_emulate
+ *	decode_or
+ *	decode_reject
+ *
+ * Each of these starts with a struct decode_header which has the following
+ * fields:
+ *
+ *	type_regs
+ *	mask
+ *	value
+ *
+ * The least significant DECODE_TYPE_BITS of type_regs contains a value
+ * from enum decode_type, this indicates which of the decode_* structs
+ * the entry contains. The value DECODE_TYPE_END indicates the end of the
+ * table.
+ *
+ * When the table is parsed, each entry is checked in turn to see if it
+ * matches the instruction to be decoded using the test:
+ *
+ *	(insn & mask) == value
+ *
+ * If no match is found before the end of the table is reached then decoding
+ * fails with INSN_REJECTED.
+ *
+ * When a match is found, decode_regs() is called to validate and modify each
+ * of the registers encoded in the instruction; the data it uses to do this
+ * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
+ * to fail with INSN_REJECTED.
+ *
+ * Once the instruction has passed the above tests, further processing
+ * depends on the type of the table entry's decode struct.
+ *
+ */
+int __kprobes
+kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+				const union decode_item *table, bool thumb)
+{
+	const struct decode_header *h = (struct decode_header *)table;
+	const struct decode_header *next;
+	bool matched = false;
+
+	insn = prepare_emulated_insn(insn, asi, thumb);
+
+	for (;; h = next) {
+		enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+		u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
+
+		if (type == DECODE_TYPE_END)
+			return INSN_REJECTED;
+
+		next = (struct decode_header *)
+				((uintptr_t)h + decode_struct_sizes[type]);
+
+		if (!matched && (insn & h->mask.bits) != h->value.bits)
+			continue;
+
+		if (!decode_regs(&insn, regs))
+			return INSN_REJECTED;
+
+		switch (type) {
+
+		case DECODE_TYPE_TABLE: {
+			struct decode_table *d = (struct decode_table *)h;
+			next = (struct decode_header *)d->table.table;
+			break;
+		}
+
+		case DECODE_TYPE_CUSTOM: {
+			struct decode_custom *d = (struct decode_custom *)h;
+			return (*d->decoder.decoder)(insn, asi);
+		}
+
+		case DECODE_TYPE_SIMULATE: {
+			struct decode_simulate *d = (struct decode_simulate *)h;
+			asi->insn_handler = d->handler.handler;
+			return INSN_GOOD_NO_SLOT;
+		}
+
+		case DECODE_TYPE_EMULATE: {
+			struct decode_emulate *d = (struct decode_emulate *)h;
+			asi->insn_handler = d->handler.handler;
+			set_emulated_insn(insn, asi, thumb);
+			return INSN_GOOD;
+		}
+
+		case DECODE_TYPE_OR:
+			matched = true;
+			break;
+
+		case DECODE_TYPE_REJECT:
+		default:
+			return INSN_REJECTED;
+		}
+		}
+	}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-arm.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-arm.c
new file mode 100644
index 0000000..ba32b39
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-arm.c
@@ -0,0 +1,1330 @@
+/*
+ * arch/arm/kernel/kprobes-test-arm.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "kprobes-test.h"
+
+
+#define TEST_ISA "32"
+
+#define TEST_ARM_TO_THUMB_INTERWORK_R(code1, reg, val, code2)	\
+	TESTCASE_START(code1 #reg code2)			\
+	TEST_ARG_REG(reg, val)					\
+	TEST_ARG_REG(14, 99f)					\
+	TEST_ARG_END("")					\
+	"50:	nop			\n\t"			\
+	"1:	"code1 #reg code2"	\n\t"			\
+	"	bx	lr		\n\t"			\
+	".thumb				\n\t"			\
+	"3:	adr	lr, 2f		\n\t"			\
+	"	bx	lr		\n\t"			\
+	".arm				\n\t"			\
+	"2:	nop			\n\t"			\
+	TESTCASE_END
+
+#define TEST_ARM_TO_THUMB_INTERWORK_P(code1, reg, val, code2)	\
+	TESTCASE_START(code1 #reg code2)			\
+	TEST_ARG_PTR(reg, val)					\
+	TEST_ARG_REG(14, 99f)					\
+	TEST_ARG_MEM(15, 3f+1)					\
+	TEST_ARG_END("")					\
+	"50:	nop			\n\t"			\
+	"1:	"code1 #reg code2"	\n\t"			\
+	"	bx	lr		\n\t"			\
+	".thumb				\n\t"			\
+	"3:	adr	lr, 2f		\n\t"			\
+	"	bx	lr		\n\t"			\
+	".arm				\n\t"			\
+	"2:	nop			\n\t"			\
+	TESTCASE_END
+
+
+void kprobe_arm_test_cases(void)
+{
+	kprobe_test_flags = 0;
+
+	TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)")
+
+#define _DATA_PROCESSING_DNM(op,s,val)						\
+	TEST_RR(  op "eq" s "	r0,  r",1, VAL1,", r",2, val, "")		\
+	TEST_RR(  op "ne" s "	r1,  r",1, VAL1,", r",2, val, ", lsl #3")	\
+	TEST_RR(  op "cs" s "	r2,  r",3, VAL1,", r",2, val, ", lsr #4")	\
+	TEST_RR(  op "cc" s "	r3,  r",3, VAL1,", r",2, val, ", asr #5")	\
+	TEST_RR(  op "mi" s "	r4,  r",5, VAL1,", r",2, N(val),", asr #6")	\
+	TEST_RR(  op "pl" s "	r5,  r",5, VAL1,", r",2, val, ", ror #7")	\
+	TEST_RR(  op "vs" s "	r6,  r",7, VAL1,", r",2, val, ", rrx")		\
+	TEST_R(   op "vc" s "	r6,  r",7, VAL1,", pc, lsl #3")			\
+	TEST_R(   op "vc" s "	r6,  r",7, VAL1,", sp, lsr #4")			\
+	TEST_R(   op "vc" s "	r6,  pc, r",7, VAL1,", asr #5")			\
+	TEST_R(   op "vc" s "	r6,  sp, r",7, VAL1,", ror #6")			\
+	TEST_RRR( op "hi" s "	r8,  r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\
+	TEST_RRR( op "ls" s "	r9,  r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\
+	TEST_RRR( op "ge" s "	r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\
+	TEST_RRR( op "lt" s "	r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
+	TEST_RR(  op "gt" s "	r12, r13"       ", r",14,val, ", ror r",14,7,"")\
+	TEST_RR(  op "le" s "	r14, r",0, val, ", r13"       ", lsl r",14,8,"")\
+	TEST_RR(  op s "	r12, pc"        ", r",14,val, ", ror r",14,7,"")\
+	TEST_RR(  op s "	r14, r",0, val, ", pc"        ", lsl r",14,8,"")\
+	TEST_R(   op "eq" s "	r0,  r",11,VAL1,", #0xf5")			\
+	TEST_R(   op "ne" s "	r11, r",0, VAL1,", #0xf5000000")		\
+	TEST_R(   op s "	r7,  r",8, VAL2,", #0x000af000")		\
+	TEST(     op s "	r4,  pc"        ", #0x00005a00")
+
+#define DATA_PROCESSING_DNM(op,val)		\
+	_DATA_PROCESSING_DNM(op,"",val)		\
+	_DATA_PROCESSING_DNM(op,"s",val)
+
+#define DATA_PROCESSING_NM(op,val)						\
+	TEST_RR(  op "ne	r",1, VAL1,", r",2, val, "")			\
+	TEST_RR(  op "eq	r",1, VAL1,", r",2, val, ", lsl #3")		\
+	TEST_RR(  op "cc	r",3, VAL1,", r",2, val, ", lsr #4")		\
+	TEST_RR(  op "cs	r",3, VAL1,", r",2, val, ", asr #5")		\
+	TEST_RR(  op "pl	r",5, VAL1,", r",2, N(val),", asr #6")		\
+	TEST_RR(  op "mi	r",5, VAL1,", r",2, val, ", ror #7")		\
+	TEST_RR(  op "vc	r",7, VAL1,", r",2, val, ", rrx")		\
+	TEST_R (  op "vs	r",7, VAL1,", pc, lsl #3")			\
+	TEST_R (  op "vs	r",7, VAL1,", sp, lsr #4")			\
+	TEST_R(   op "vs	pc, r",7, VAL1,", asr #5")			\
+	TEST_R(   op "vs	sp, r",7, VAL1,", ror #6")			\
+	TEST_RRR( op "ls	r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")	\
+	TEST_RRR( op "hi	r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")	\
+	TEST_RRR( op "lt	r",11,VAL1,", r",14,val, ", asr r",7, 5,"")	\
+	TEST_RRR( op "ge	r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")	\
+	TEST_RR(  op "le	r13"       ", r",14,val, ", ror r",14,7,"")	\
+	TEST_RR(  op "gt	r",0, val, ", r13"       ", lsl r",14,8,"")	\
+	TEST_RR(  op "	pc"        ", r",14,val, ", ror r",14,7,"")		\
+	TEST_RR(  op "	r",0, val, ", pc"        ", lsl r",14,8,"")		\
+	TEST_R(   op "eq	r",11,VAL1,", #0xf5")				\
+	TEST_R(   op "ne	r",0, VAL1,", #0xf5000000")			\
+	TEST_R(   op "	r",8, VAL2,", #0x000af000")
+
+#define _DATA_PROCESSING_DM(op,s,val)					\
+	TEST_R(   op "eq" s "	r0,  r",1, val, "")			\
+	TEST_R(   op "ne" s "	r1,  r",1, val, ", lsl #3")		\
+	TEST_R(   op "cs" s "	r2,  r",3, val, ", lsr #4")		\
+	TEST_R(   op "cc" s "	r3,  r",3, val, ", asr #5")		\
+	TEST_R(   op "mi" s "	r4,  r",5, N(val),", asr #6")		\
+	TEST_R(   op "pl" s "	r5,  r",5, val, ", ror #7")		\
+	TEST_R(   op "vs" s "	r6,  r",10,val, ", rrx")		\
+	TEST(     op "vs" s "	r7,  pc, lsl #3")			\
+	TEST(     op "vs" s "	r7,  sp, lsr #4")			\
+	TEST_RR(  op "vc" s "	r8,  r",7, val, ", lsl r",0, 3,"")	\
+	TEST_RR(  op "hi" s "	r9,  r",9, val, ", lsr r",7, 4,"")	\
+	TEST_RR(  op "ls" s "	r10, r",9, val, ", asr r",7, 5,"")	\
+	TEST_RR(  op "ge" s "	r11, r",11,N(val),", asr r",7, 6,"")	\
+	TEST_RR(  op "lt" s "	r12, r",11,val, ", ror r",14,7,"")	\
+	TEST_R(   op "gt" s "	r14, r13"       ", lsl r",14,8,"")	\
+	TEST_R(   op "le" s "	r14, pc"        ", lsl r",14,8,"")	\
+	TEST(     op "eq" s "	r0,  #0xf5")				\
+	TEST(     op "ne" s "	r11, #0xf5000000")			\
+	TEST(     op s "	r7,  #0x000af000")			\
+	TEST(     op s "	r4,  #0x00005a00")
+
+#define DATA_PROCESSING_DM(op,val)		\
+	_DATA_PROCESSING_DM(op,"",val)		\
+	_DATA_PROCESSING_DM(op,"s",val)
+
+	DATA_PROCESSING_DNM("and",0xf00f00ff)
+	DATA_PROCESSING_DNM("eor",0xf00f00ff)
+	DATA_PROCESSING_DNM("sub",VAL2)
+	DATA_PROCESSING_DNM("rsb",VAL2)
+	DATA_PROCESSING_DNM("add",VAL2)
+	DATA_PROCESSING_DNM("adc",VAL2)
+	DATA_PROCESSING_DNM("sbc",VAL2)
+	DATA_PROCESSING_DNM("rsc",VAL2)
+	DATA_PROCESSING_NM("tst",0xf00f00ff)
+	DATA_PROCESSING_NM("teq",0xf00f00ff)
+	DATA_PROCESSING_NM("cmp",VAL2)
+	DATA_PROCESSING_NM("cmn",VAL2)
+	DATA_PROCESSING_DNM("orr",0xf00f00ff)
+	DATA_PROCESSING_DM("mov",VAL2)
+	DATA_PROCESSING_DNM("bic",0xf00f00ff)
+	DATA_PROCESSING_DM("mvn",VAL2)
+
+	TEST("mov	ip, sp") /* This has special case emulation code */
+
+	TEST_SUPPORTED("mov	pc, #0x1000");
+	TEST_SUPPORTED("mov	sp, #0x1000");
+	TEST_SUPPORTED("cmp	pc, #0x1000");
+	TEST_SUPPORTED("cmp	sp, #0x1000");
+
+	/* Data-processing with PC as shift*/
+	TEST_UNSUPPORTED(".word 0xe15c0f1e	@ cmp	r12, r14, asl pc")
+	TEST_UNSUPPORTED(".word 0xe1a0cf1e	@ mov	r12, r14, asl pc")
+	TEST_UNSUPPORTED(".word 0xe08caf1e	@ add	r10, r12, r14, asl pc")
+
+	/* Data-processing with PC as shift*/
+	TEST_UNSUPPORTED("movs	pc, r1")
+	TEST_UNSUPPORTED("movs	pc, r1, lsl r2")
+	TEST_UNSUPPORTED("movs	pc, #0x10000")
+	TEST_UNSUPPORTED("adds	pc, lr, r1")
+	TEST_UNSUPPORTED("adds	pc, lr, r1, lsl r2")
+	TEST_UNSUPPORTED("adds	pc, lr, #4")
+
+	/* Data-processing with SP as target */
+	TEST("add	sp, sp, #16")
+	TEST("sub	sp, sp, #8")
+	TEST("bic	sp, sp, #0x20")
+	TEST("orr	sp, sp, #0x20")
+	TEST_PR( "add	sp, r",10,0,", r",11,4,"")
+	TEST_PRR("add	sp, r",10,0,", r",11,4,", asl r",12,1,"")
+	TEST_P(  "mov	sp, r",10,0,"")
+	TEST_PR( "mov	sp, r",10,0,", asl r",12,0,"")
+
+	/* Data-processing with PC as target */
+	TEST_BF(   "add	pc, pc, #2f-1b-8")
+	TEST_BF_R ("add	pc, pc, r",14,2f-1f-8,"")
+	TEST_BF_R ("add	pc, r",14,2f-1f-8,", pc")
+	TEST_BF_R ("mov	pc, r",0,2f,"")
+	TEST_BF_RR("mov	pc, r",0,2f,", asl r",1,0,"")
+	TEST_BB(   "sub	pc, pc, #1b-2b+8")
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_BB(   "sub	pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#endif
+	TEST_BB_R( "sub	pc, pc, r",14, 1f-2f+8,"")
+	TEST_BB_R( "rsb	pc, r",14,1f-2f+8,", pc")
+	TEST_RR(   "add	pc, pc, r",10,-2,", asl r",11,1,"")
+#ifdef CONFIG_THUMB2_KERNEL
+	TEST_ARM_TO_THUMB_INTERWORK_R("add	pc, pc, r",0,3f-1f-8+1,"")
+	TEST_ARM_TO_THUMB_INTERWORK_R("sub	pc, r",0,3f+8+1,", #8")
+#endif
+	TEST_GROUP("Miscellaneous instructions")
+
+	TEST("mrs	r0, cpsr")
+	TEST("mrspl	r7, cpsr")
+	TEST("mrs	r14, cpsr")
+	TEST_UNSUPPORTED(".word 0xe10ff000	@ mrs r15, cpsr")
+	TEST_UNSUPPORTED("mrs	r0, spsr")
+	TEST_UNSUPPORTED("mrs	lr, spsr")
+
+	TEST_UNSUPPORTED("msr	cpsr, r0")
+	TEST_UNSUPPORTED("msr	cpsr_f, lr")
+	TEST_UNSUPPORTED("msr	spsr, r0")
+
+	TEST_BF_R("bx	r",0,2f,"")
+	TEST_BB_R("bx	r",7,2f,"")
+	TEST_BF_R("bxeq	r",14,2f,"")
+
+	TEST_R("clz	r0, r",0, 0x0,"")
+	TEST_R("clzeq	r7, r",14,0x1,"")
+	TEST_R("clz	lr, r",7, 0xffffffff,"")
+	TEST(  "clz	r4, sp")
+	TEST_UNSUPPORTED(".word 0x016fff10	@ clz pc, r0")
+	TEST_UNSUPPORTED(".word 0x016f0f1f	@ clz r0, pc")
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_UNSUPPORTED("bxj	r0")
+#endif
+
+	TEST_BF_R("blx	r",0,2f,"")
+	TEST_BB_R("blx	r",7,2f,"")
+	TEST_BF_R("blxeq	r",14,2f,"")
+	TEST_UNSUPPORTED(".word 0x0120003f	@ blx pc")
+
+	TEST_RR(   "qadd	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(   "qaddvs	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_R(    "qadd	lr, r",9, VAL2,", r13")
+	TEST_RR(   "qsub	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(   "qsubvs	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_R(    "qsub	lr, r",9, VAL2,", r13")
+	TEST_RR(   "qdadd	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(   "qdaddvs	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_R(    "qdadd	lr, r",9, VAL2,", r13")
+	TEST_RR(   "qdsub	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(   "qdsubvs	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_R(    "qdsub	lr, r",9, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe101f050	@ qadd pc, r0, r1")
+	TEST_UNSUPPORTED(".word 0xe121f050	@ qsub pc, r0, r1")
+	TEST_UNSUPPORTED(".word 0xe141f050	@ qdadd pc, r0, r1")
+	TEST_UNSUPPORTED(".word 0xe161f050	@ qdsub pc, r0, r1")
+	TEST_UNSUPPORTED(".word 0xe16f2050	@ qdsub r2, r0, pc")
+	TEST_UNSUPPORTED(".word 0xe161205f	@ qdsub r2, pc, r1")
+
+	TEST_UNSUPPORTED("bkpt	0xffff")
+	TEST_UNSUPPORTED("bkpt	0x0000")
+
+	TEST_UNSUPPORTED(".word 0xe1600070 @ smc #0")
+
+	TEST_GROUP("Halfword multiply and multiply-accumulate")
+
+	TEST_RRR(    "smlabb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlabbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlabb	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe10f3281 @ smlabb pc, r1, r2, r3")
+	TEST_RRR(    "smlatb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlatbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlatb	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe10f32a1 @ smlatb pc, r1, r2, r3")
+	TEST_RRR(    "smlabt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlabtge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlabt	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe10f32c1 @ smlabt pc, r1, r2, r3")
+	TEST_RRR(    "smlatt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlattge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlatt	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe10f32e1 @ smlatt pc, r1, r2, r3")
+
+	TEST_RRR(    "smlawb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlawbge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlawb	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe12f3281 @ smlawb pc, r1, r2, r3")
+	TEST_RRR(    "smlawt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "smlawtge	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "smlawt	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe12f32c1 @ smlawt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe12032cf @ smlawt r0, pc, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe1203fc1 @ smlawt r0, r1, pc, r3")
+	TEST_UNSUPPORTED(".word 0xe120f2c1 @ smlawt r0, r1, r2, pc")
+
+	TEST_RR(    "smulwb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulwbge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smulwb	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe12f02a1 @ smulwb pc, r1, r2")
+	TEST_RR(    "smulwt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulwtge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smulwt	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe12f02e1 @ smulwt pc, r1, r2")
+
+	TEST_RRRR(  "smlalbb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlalbble	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlalbb	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe14f1382 @ smlalbb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe141f382 @ smlalbb r1, pc, r2, r3")
+	TEST_RRRR(  "smlaltb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlaltble	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlaltb	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe14f13a2 @ smlaltb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe141f3a2 @ smlaltb r1, pc, r2, r3")
+	TEST_RRRR(  "smlalbt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlalbtle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlalbt	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe14f13c2 @ smlalbt pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe141f3c2 @ smlalbt r1, pc, r2, r3")
+	TEST_RRRR(  "smlaltt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlalttle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlaltt	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe14f13e2 @ smlalbb pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe140f3e2 @ smlalbb r0, pc, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe14013ef @ smlalbb r0, r1, pc, r3")
+	TEST_UNSUPPORTED(".word 0xe1401fe2 @ smlalbb r0, r1, r2, pc")
+
+	TEST_RR(    "smulbb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulbbge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smulbb	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe16f0281 @ smulbb pc, r1, r2")
+	TEST_RR(    "smultb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smultbge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smultb	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe16f02a1 @ smultb pc, r1, r2")
+	TEST_RR(    "smulbt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulbtge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smulbt	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe16f02c1 @ smultb pc, r1, r2")
+	TEST_RR(    "smultt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulttge	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_R(     "smultt	lr, r",1, VAL2,", r13")
+	TEST_UNSUPPORTED(".word 0xe16f02e1 @ smultt pc, r1, r2")
+	TEST_UNSUPPORTED(".word 0xe16002ef @ smultt r0, pc, r2")
+	TEST_UNSUPPORTED(".word 0xe1600fe1 @ smultt r0, r1, pc")
+
+	TEST_GROUP("Multiply and multiply-accumulate")
+
+	TEST_RR(    "mul	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "mulls	r7, r",8, VAL2,", r",9, VAL2,"")
+	TEST_R(     "mul	lr, r",4, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe00f0291 @ mul pc, r1, r2")
+	TEST_UNSUPPORTED(".word 0xe000029f @ mul r0, pc, r2")
+	TEST_UNSUPPORTED(".word 0xe0000f91 @ mul r0, r1, pc")
+	TEST_RR(    "muls	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "mullss	r7, r",8, VAL2,", r",9, VAL2,"")
+	TEST_R(     "muls	lr, r",4, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe01f0291 @ muls pc, r1, r2")
+
+	TEST_RRR(    "mla	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "mlahi	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "mla	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe02f3291 @ mla pc, r1, r2, r3")
+	TEST_RRR(    "mlas	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(    "mlahis	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(     "mlas	lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe03f3291 @ mlas pc, r1, r2, r3")
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_RR(  "umaal	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(  "umaalls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_R(   "umaal	lr, r12, r",11,VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe041f392 @ umaal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
+	TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+
+	TEST_RRR(  "mls		r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(  "mlshi	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(   "mls		lr, r",1, VAL2,", r",2, VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe06f3291 @ mls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe060329f @ mls r0, pc, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0603f91 @ mls r0, r1, pc, r3")
+	TEST_UNSUPPORTED(".word 0xe060f291 @ mls r0, r1, r2, pc")
+#endif
+
+	TEST_UNSUPPORTED(".word 0xe0700090 @ undef")
+	TEST_UNSUPPORTED(".word 0xe07fff9f @ undef")
+
+	TEST_RR(  "umull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(  "umullls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_R(   "umull	lr, r12, r",11,VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe081f392 @ umull pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe08f1392 @ umull r1, pc, r2, r3")
+	TEST_RR(  "umulls	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(  "umulllss	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_R(   "umulls	lr, r12, r",11,VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe091f392 @ umulls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe09f1392 @ umulls r1, pc, r2, r3")
+
+	TEST_RRRR(  "umlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "umlalle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "umlal	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe0af1392 @ umlal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0a1f392 @ umlal r1, pc, r2, r3")
+	TEST_RRRR(  "umlals	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "umlalles	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "umlals	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe0bf1392 @ umlals pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0b1f392 @ umlals r1, pc, r2, r3")
+
+	TEST_RR(  "smull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(  "smullls	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_R(   "smull	lr, r12, r",11,VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe0c1f392 @ smull pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0cf1392 @ smull r1, pc, r2, r3")
+	TEST_RR(  "smulls	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(  "smulllss	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_R(   "smulls	lr, r12, r",11,VAL3,", r13")
+	TEST_UNSUPPORTED(".word 0xe0d1f392 @ smulls pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0df1392 @ smulls r1, pc, r2, r3")
+
+	TEST_RRRR(  "smlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlalle	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlal	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe0ef1392 @ smlal pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0e1f392 @ smlal r1, pc, r2, r3")
+	TEST_RRRR(  "smlals	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR(  "smlalles	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRR(   "smlals	r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+	TEST_UNSUPPORTED(".word 0xe0ff1392 @ smlals pc, r1, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0f0f392 @ smlals r0, pc, r2, r3")
+	TEST_UNSUPPORTED(".word 0xe0f0139f @ smlals r0, r1, pc, r3")
+	TEST_UNSUPPORTED(".word 0xe0f01f92 @ smlals r0, r1, r2, pc")
+
+	TEST_GROUP("Synchronization primitives")
+
+#if __LINUX_ARM_ARCH__ < 6
+	TEST_RP("swp	lr, r",7,VAL2,", [r",8,0,"]")
+	TEST_R( "swpvs	r0, r",1,VAL1,", [sp]")
+	TEST_RP("swp	sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+	TEST_UNSUPPORTED(".word 0xe108e097 @ swp	lr, r7, [r8]")
+	TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs	r0, r1, [sp]")
+	TEST_UNSUPPORTED(".word 0xe10cd09e @ swp	sp, r14 [r12]")
+#endif
+	TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
+	TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
+	TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
+#if __LINUX_ARM_ARCH__ < 6
+	TEST_RP("swpb	lr, r",7,VAL2,", [r",8,0,"]")
+	TEST_R( "swpvsb	r0, r",1,VAL1,", [sp]")
+#else
+	TEST_UNSUPPORTED(".word 0xe148e097 @ swpb	lr, r7, [r8]")
+	TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb	r0, r1, [sp]")
+#endif
+	TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
+
+	TEST_UNSUPPORTED(".word	0xe1100090") /* Unallocated space */
+	TEST_UNSUPPORTED(".word	0xe1200090") /* Unallocated space */
+	TEST_UNSUPPORTED(".word	0xe1300090") /* Unallocated space */
+	TEST_UNSUPPORTED(".word	0xe1500090") /* Unallocated space */
+	TEST_UNSUPPORTED(".word	0xe1600090") /* Unallocated space */
+	TEST_UNSUPPORTED(".word	0xe1700090") /* Unallocated space */
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_UNSUPPORTED("ldrex	r2, [sp]")
+	TEST_UNSUPPORTED("strexd	r0, r2, r3, [sp]")
+	TEST_UNSUPPORTED("ldrexd	r2, r3, [sp]")
+	TEST_UNSUPPORTED("strexb	r0, r2, [sp]")
+	TEST_UNSUPPORTED("ldrexb	r2, [sp]")
+	TEST_UNSUPPORTED("strexh	r0, r2, [sp]")
+	TEST_UNSUPPORTED("ldrexh	r2, [sp]")
+#endif
+	TEST_GROUP("Extra load/store instructions")
+
+	TEST_RPR(  "strh	r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")
+	TEST_RPR(  "streqh	r",14,VAL2,", [r",13,0, ", r",12, 48,"]")
+	TEST_RPR(  "strh	r",1, VAL1,", [r",2, 24,", r",3,  48,"]!")
+	TEST_RPR(  "strneh	r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
+	TEST_RPR(  "strh	r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
+	TEST_RPR(  "strh	r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
+	TEST_UNSUPPORTED(".word 0xe1afc0ba	@ strh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(".word 0xe089f0bb	@ strh pc, [r9], r11")
+	TEST_UNSUPPORTED(".word 0xe089a0bf	@ strh r10, [r9], pc")
+
+	TEST_PR(   "ldrh	r0, [r",0,  48,", -r",2, 24,"]")
+	TEST_PR(   "ldrcsh	r14, [r",13,0, ", r",12, 48,"]")
+	TEST_PR(   "ldrh	r1, [r",2,  24,", r",3,  48,"]!")
+	TEST_PR(   "ldrcch	r12, [r",11,48,", -r",10,24,"]!")
+	TEST_PR(   "ldrh	r2, [r",3,  24,"], r",4, 48,"")
+	TEST_PR(   "ldrh	r10, [r",9, 48,"], -r",11,24,"")
+	TEST_UNSUPPORTED(".word 0xe1bfc0ba	@ ldrh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(".word 0xe099f0bb	@ ldrh pc, [r9], r11")
+	TEST_UNSUPPORTED(".word 0xe099a0bf	@ ldrh r10, [r9], pc")
+
+	TEST_RP(   "strh	r",0, VAL1,", [r",1, 24,", #-2]")
+	TEST_RP(   "strmih	r",14,VAL2,", [r",13,0, ", #2]")
+	TEST_RP(   "strh	r",1, VAL1,", [r",2, 24,", #4]!")
+	TEST_RP(   "strplh	r",12,VAL2,", [r",11,24,", #-4]!")
+	TEST_RP(   "strh	r",2, VAL1,", [r",3, 24,"], #48")
+	TEST_RP(   "strh	r",10,VAL2,", [r",9, 64,"], #-48")
+	TEST_UNSUPPORTED(".word 0xe1efc3b0	@ strh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(".word 0xe0c9f3b0	@ strh pc, [r9], #48")
+
+	TEST_P(	   "ldrh	r0, [r",0,  24,", #-2]")
+	TEST_P(	   "ldrvsh	r14, [r",13,0, ", #2]")
+	TEST_P(	   "ldrh	r1, [r",2,  24,", #4]!")
+	TEST_P(	   "ldrvch	r12, [r",11,24,", #-4]!")
+	TEST_P(	   "ldrh	r2, [r",3,  24,"], #48")
+	TEST_P(	   "ldrh	r10, [r",9, 64,"], #-48")
+	TEST(      "ldrh	r0, [pc, #0]")
+	TEST_UNSUPPORTED(".word 0xe1ffc3b0	@ ldrh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(".word 0xe0d9f3b0	@ ldrh pc, [r9], #48")
+
+	TEST_PR(   "ldrsb	r0, [r",0,  48,", -r",2, 24,"]")
+	TEST_PR(   "ldrhisb	r14, [r",13,0,", r",12,  48,"]")
+	TEST_PR(   "ldrsb	r1, [r",2,  24,", r",3,  48,"]!")
+	TEST_PR(   "ldrlssb	r12, [r",11,48,", -r",10,24,"]!")
+	TEST_PR(   "ldrsb	r2, [r",3,  24,"], r",4, 48,"")
+	TEST_PR(   "ldrsb	r10, [r",9, 48,"], -r",11,24,"")
+	TEST_UNSUPPORTED(".word 0xe1bfc0da	@ ldrsb r12, [pc, r10]!")
+	TEST_UNSUPPORTED(".word 0xe099f0db	@ ldrsb pc, [r9], r11")
+
+	TEST_P(	   "ldrsb	r0, [r",0,  24,", #-1]")
+	TEST_P(	   "ldrgesb	r14, [r",13,0, ", #1]")
+	TEST_P(	   "ldrsb	r1, [r",2,  24,", #4]!")
+	TEST_P(	   "ldrltsb	r12, [r",11,24,", #-4]!")
+	TEST_P(	   "ldrsb	r2, [r",3,  24,"], #48")
+	TEST_P(	   "ldrsb	r10, [r",9, 64,"], #-48")
+	TEST(      "ldrsb	r0, [pc, #0]")
+	TEST_UNSUPPORTED(".word 0xe1ffc3d0	@ ldrsb r12, [pc, #48]!")
+	TEST_UNSUPPORTED(".word 0xe0d9f3d0	@ ldrsb pc, [r9], #48")
+
+	TEST_PR(   "ldrsh	r0, [r",0,  48,", -r",2, 24,"]")
+	TEST_PR(   "ldrgtsh	r14, [r",13,0, ", r",12, 48,"]")
+	TEST_PR(   "ldrsh	r1, [r",2,  24,", r",3,  48,"]!")
+	TEST_PR(   "ldrlesh	r12, [r",11,48,", -r",10,24,"]!")
+	TEST_PR(   "ldrsh	r2, [r",3,  24,"], r",4, 48,"")
+	TEST_PR(   "ldrsh	r10, [r",9, 48,"], -r",11,24,"")
+	TEST_UNSUPPORTED(".word 0xe1bfc0fa	@ ldrsh r12, [pc, r10]!")
+	TEST_UNSUPPORTED(".word 0xe099f0fb	@ ldrsh pc, [r9], r11")
+
+	TEST_P(	   "ldrsh	r0, [r",0,  24,", #-1]")
+	TEST_P(	   "ldreqsh	r14, [r",13,0 ,", #1]")
+	TEST_P(	   "ldrsh	r1, [r",2,  24,", #4]!")
+	TEST_P(	   "ldrnesh	r12, [r",11,24,", #-4]!")
+	TEST_P(	   "ldrsh	r2, [r",3,  24,"], #48")
+	TEST_P(	   "ldrsh	r10, [r",9, 64,"], #-48")
+	TEST(      "ldrsh	r0, [pc, #0]")
+	TEST_UNSUPPORTED(".word 0xe1ffc3f0	@ ldrsh r12, [pc, #48]!")
+	TEST_UNSUPPORTED(".word 0xe0d9f3f0	@ ldrsh pc, [r9], #48")
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_UNSUPPORTED("strht	r1, [r2], r3")
+	TEST_UNSUPPORTED("ldrht	r1, [r2], r3")
+	TEST_UNSUPPORTED("strht	r1, [r2], #48")
+	TEST_UNSUPPORTED("ldrht	r1, [r2], #48")
+	TEST_UNSUPPORTED("ldrsbt	r1, [r2], r3")
+	TEST_UNSUPPORTED("ldrsbt	r1, [r2], #48")
+	TEST_UNSUPPORTED("ldrsht	r1, [r2], r3")
+	TEST_UNSUPPORTED("ldrsht	r1, [r2], #48")
+#endif
+
+	TEST_RPR(  "strd	r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
+	TEST_RPR(  "strccd	r",8, VAL2,", [r",13,0, ", r",12,48,"]")
+	TEST_RPR(  "strd	r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
+	TEST_RPR(  "strcsd	r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
+	TEST_RPR(  "strd	r",2, VAL1,", [r",5, 24,"], r",4,48,"")
+	TEST_RPR(  "strd	r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
+	TEST_UNSUPPORTED(".word 0xe1afc0fa	@ strd r12, [pc, r10]!")
+
+	TEST_PR(   "ldrd	r0, [r",0, 48,", -r",2,24,"]")
+	TEST_PR(   "ldrmid	r8, [r",13,0, ", r",12,48,"]")
+	TEST_PR(   "ldrd	r4, [r",2, 24,", r",3, 48,"]!")
+	TEST_PR(   "ldrpld	r6, [r",11,48,", -r",10,24,"]!")
+	TEST_PR(   "ldrd	r2, [r",5, 24,"], r",4,48,"")
+	TEST_PR(   "ldrd	r10, [r",9,48,"], -r",7,24,"")
+	TEST_UNSUPPORTED(".word 0xe1afc0da	@ ldrd r12, [pc, r10]!")
+	TEST_UNSUPPORTED(".word 0xe089f0db	@ ldrd pc, [r9], r11")
+	TEST_UNSUPPORTED(".word 0xe089e0db	@ ldrd lr, [r9], r11")
+	TEST_UNSUPPORTED(".word 0xe089c0df	@ ldrd r12, [r9], pc")
+
+	TEST_RP(   "strd	r",0, VAL1,", [r",1, 24,", #-8]")
+	TEST_RP(   "strvsd	r",8, VAL2,", [r",13,0, ", #8]")
+	TEST_RP(   "strd	r",4, VAL1,", [r",2, 24,", #16]!")
+	TEST_RP(   "strvcd	r",12,VAL2,", [r",11,24,", #-16]!")
+	TEST_RP(   "strd	r",2, VAL1,", [r",4, 24,"], #48")
+	TEST_RP(   "strd	r",10,VAL2,", [r",9, 64,"], #-48")
+	TEST_UNSUPPORTED(".word 0xe1efc3f0	@ strd r12, [pc, #48]!")
+
+	TEST_P(	   "ldrd	r0, [r",0, 24,", #-8]")
+	TEST_P(	   "ldrhid	r8, [r",13,0, ", #8]")
+	TEST_P(	   "ldrd	r4, [r",2, 24,", #16]!")
+	TEST_P(	   "ldrlsd	r6, [r",11,24,", #-16]!")
+	TEST_P(	   "ldrd	r2, [r",5, 24,"], #48")
+	TEST_P(	   "ldrd	r10, [r",9,6,"], #-48")
+	TEST_UNSUPPORTED(".word 0xe1efc3d0	@ ldrd r12, [pc, #48]!")
+	TEST_UNSUPPORTED(".word 0xe0c9f3d0	@ ldrd pc, [r9], #48")
+	TEST_UNSUPPORTED(".word 0xe0c9e3d0	@ ldrd lr, [r9], #48")
+
+	TEST_GROUP("Miscellaneous")
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST("movw	r0, #0")
+	TEST("movw	r0, #0xffff")
+	TEST("movw	lr, #0xffff")
+	TEST_UNSUPPORTED(".word 0xe300f000	@ movw pc, #0")
+	TEST_R("movt	r",0, VAL1,", #0")
+	TEST_R("movt	r",0, VAL2,", #0xffff")
+	TEST_R("movt	r",14,VAL1,", #0xffff")
+	TEST_UNSUPPORTED(".word 0xe340f000	@ movt pc, #0")
+#endif
+
+	TEST_UNSUPPORTED("msr	cpsr, 0x13")
+	TEST_UNSUPPORTED("msr	cpsr_f, 0xf0000000")
+	TEST_UNSUPPORTED("msr	spsr, 0x13")
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_SUPPORTED("yield")
+	TEST("sev")
+	TEST("nop")
+	TEST("wfi")
+	TEST_SUPPORTED("wfe")
+	TEST_UNSUPPORTED("dbg #0")
+#endif
+
+	TEST_GROUP("Load/store word and unsigned byte")
+
+#define LOAD_STORE(byte)							\
+	TEST_RP( "str"byte"	r",0, VAL1,", [r",1, 24,", #-2]")		\
+	TEST_RP( "str"byte"	r",14,VAL2,", [r",13,0, ", #2]")		\
+	TEST_RP( "str"byte"	r",1, VAL1,", [r",2, 24,", #4]!")		\
+	TEST_RP( "str"byte"	r",12,VAL2,", [r",11,24,", #-4]!")		\
+	TEST_RP( "str"byte"	r",2, VAL1,", [r",3, 24,"], #48")		\
+	TEST_RP( "str"byte"	r",10,VAL2,", [r",9, 64,"], #-48")		\
+	TEST_RPR("str"byte"	r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")	\
+	TEST_RPR("str"byte"	r",14,VAL2,", [r",13,0, ", r",12, 48,"]")	\
+	TEST_RPR("str"byte"	r",1, VAL1,", [r",2, 24,", r",3,  48,"]!")	\
+	TEST_RPR("str"byte"	r",12,VAL2,", [r",11,48,", -r",10,24,"]!")	\
+	TEST_RPR("str"byte"	r",2, VAL1,", [r",3, 24,"], r",4, 48,"")	\
+	TEST_RPR("str"byte"	r",10,VAL2,", [r",9, 48,"], -r",11,24,"")	\
+	TEST_RPR("str"byte"	r",0, VAL1,", [r",1, 24,", r",2,  32,", asl #1]")\
+	TEST_RPR("str"byte"	r",14,VAL2,", [r",13,0, ", r",12, 32,", lsr #2]")\
+	TEST_RPR("str"byte"	r",1, VAL1,", [r",2, 24,", r",3,  32,", asr #3]!")\
+	TEST_RPR("str"byte"	r",12,VAL2,", [r",11,24,", r",10, 4,", ror #31]!")\
+	TEST_P(  "ldr"byte"	r0, [r",0,  24,", #-2]")			\
+	TEST_P(  "ldr"byte"	r14, [r",13,0, ", #2]")				\
+	TEST_P(  "ldr"byte"	r1, [r",2,  24,", #4]!")			\
+	TEST_P(  "ldr"byte"	r12, [r",11,24,", #-4]!")			\
+	TEST_P(  "ldr"byte"	r2, [r",3,  24,"], #48")			\
+	TEST_P(  "ldr"byte"	r10, [r",9, 64,"], #-48")			\
+	TEST_PR( "ldr"byte"	r0, [r",0,  48,", -r",2, 24,"]")		\
+	TEST_PR( "ldr"byte"	r14, [r",13,0, ", r",12, 48,"]")		\
+	TEST_PR( "ldr"byte"	r1, [r",2,  24,", r",3, 48,"]!")		\
+	TEST_PR( "ldr"byte"	r12, [r",11,48,", -r",10,24,"]!")		\
+	TEST_PR( "ldr"byte"	r2, [r",3,  24,"], r",4, 48,"")			\
+	TEST_PR( "ldr"byte"	r10, [r",9, 48,"], -r",11,24,"")		\
+	TEST_PR( "ldr"byte"	r0, [r",0,  24,", r",2,  32,", asl #1]")	\
+	TEST_PR( "ldr"byte"	r14, [r",13,0, ", r",12, 32,", lsr #2]")	\
+	TEST_PR( "ldr"byte"	r1, [r",2,  24,", r",3,  32,", asr #3]!")	\
+	TEST_PR( "ldr"byte"	r12, [r",11,24,", r",10, 4,", ror #31]!")	\
+	TEST(    "ldr"byte"	r0, [pc, #0]")					\
+	TEST_R(  "ldr"byte"	r12, [pc, r",14,0,"]")
+
+	LOAD_STORE("")
+	TEST_P(   "str	pc, [r",0,0,", #15*4]")
+	TEST_R(   "str	pc, [sp, r",2,15*4,"]")
+	TEST_BF(  "ldr	pc, [sp, #15*4]")
+	TEST_BF_R("ldr	pc, [sp, r",2,15*4,"]")
+
+	TEST_P(   "str	sp, [r",0,0,", #13*4]")
+	TEST_R(   "str	sp, [sp, r",2,13*4,"]")
+	TEST_BF(  "ldr	sp, [sp, #13*4]")
+	TEST_BF_R("ldr	sp, [sp, r",2,13*4,"]")
+
+#ifdef CONFIG_THUMB2_KERNEL
+	TEST_ARM_TO_THUMB_INTERWORK_P("ldr	pc, [r",0,0,", #15*4]")
+#endif
+	TEST_UNSUPPORTED(".word 0xe5af6008	@ str r6, [pc, #8]!")
+	TEST_UNSUPPORTED(".word 0xe7af6008	@ str r6, [pc, r8]!")
+	TEST_UNSUPPORTED(".word 0xe5bf6008	@ ldr r6, [pc, #8]!")
+	TEST_UNSUPPORTED(".word 0xe7bf6008	@ ldr r6, [pc, r8]!")
+	TEST_UNSUPPORTED(".word 0xe788600f	@ str r6, [r8, pc]")
+	TEST_UNSUPPORTED(".word 0xe798600f	@ ldr r6, [r8, pc]")
+
+	LOAD_STORE("b")
+	TEST_UNSUPPORTED(".word 0xe5f7f008	@ ldrb pc, [r7, #8]!")
+	TEST_UNSUPPORTED(".word 0xe7f7f008	@ ldrb pc, [r7, r8]!")
+	TEST_UNSUPPORTED(".word 0xe5ef6008	@ strb r6, [pc, #8]!")
+	TEST_UNSUPPORTED(".word 0xe7ef6008	@ strb r6, [pc, r3]!")
+	TEST_UNSUPPORTED(".word 0xe5ff6008	@ ldrb r6, [pc, #8]!")
+	TEST_UNSUPPORTED(".word 0xe7ff6008	@ ldrb r6, [pc, r3]!")
+
+	TEST_UNSUPPORTED("ldrt	r0, [r1], #4")
+	TEST_UNSUPPORTED("ldrt	r1, [r2], r3")
+	TEST_UNSUPPORTED("strt	r2, [r3], #4")
+	TEST_UNSUPPORTED("strt	r3, [r4], r5")
+	TEST_UNSUPPORTED("ldrbt	r4, [r5], #4")
+	TEST_UNSUPPORTED("ldrbt	r5, [r6], r7")
+	TEST_UNSUPPORTED("strbt	r6, [r7], #4")
+	TEST_UNSUPPORTED("strbt	r7, [r8], r9")
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_GROUP("Parallel addition and subtraction, signed")
+
+	TEST_UNSUPPORTED(".word 0xe6000010") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe60fffff") /* Unallocated space */
+
+	TEST_RR(    "sadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cff1a	@ sadd16	pc, r12, r10")
+	TEST_RR(    "sasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cff3a	@ sasx	pc, r12, r10")
+	TEST_RR(    "ssax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "ssax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cff5a	@ ssax	pc, r12, r10")
+	TEST_RR(    "ssub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "ssub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cff7a	@ ssub16	pc, r12, r10")
+	TEST_RR(    "sadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cff9a	@ sadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe61000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe61fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe61000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe61fffdf") /* Unallocated space */
+	TEST_RR(    "ssub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "ssub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe61cfffa	@ ssub8	pc, r12, r10")
+
+	TEST_RR(    "qadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cff1a	@ qadd16	pc, r12, r10")
+	TEST_RR(    "qasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cff3a	@ qasx	pc, r12, r10")
+	TEST_RR(    "qsax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qsax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cff5a	@ qsax	pc, r12, r10")
+	TEST_RR(    "qsub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qsub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cff7a	@ qsub16	pc, r12, r10")
+	TEST_RR(    "qadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cff9a	@ qadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe62000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe62fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe62000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe62fffdf") /* Unallocated space */
+	TEST_RR(    "qsub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "qsub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe62cfffa	@ qsub8	pc, r12, r10")
+
+	TEST_RR(    "shadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cff1a	@ shadd16	pc, r12, r10")
+	TEST_RR(    "shasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cff3a	@ shasx	pc, r12, r10")
+	TEST_RR(    "shsax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shsax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cff5a	@ shsax	pc, r12, r10")
+	TEST_RR(    "shsub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shsub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cff7a	@ shsub16	pc, r12, r10")
+	TEST_RR(    "shadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cff9a	@ shadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe63000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe63fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe63000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe63fffdf") /* Unallocated space */
+	TEST_RR(    "shsub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "shsub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe63cfffa	@ shsub8	pc, r12, r10")
+
+	TEST_GROUP("Parallel addition and subtraction, unsigned")
+
+	TEST_UNSUPPORTED(".word 0xe6400010") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe64fffff") /* Unallocated space */
+
+	TEST_RR(    "uadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cff1a	@ uadd16	pc, r12, r10")
+	TEST_RR(    "uasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cff3a	@ uasx	pc, r12, r10")
+	TEST_RR(    "usax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "usax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cff5a	@ usax	pc, r12, r10")
+	TEST_RR(    "usub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "usub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cff7a	@ usub16	pc, r12, r10")
+	TEST_RR(    "uadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cff9a	@ uadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe65000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe65fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe65000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe65fffdf") /* Unallocated space */
+	TEST_RR(    "usub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "usub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe65cfffa	@ usub8	pc, r12, r10")
+
+	TEST_RR(    "uqadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cff1a	@ uqadd16	pc, r12, r10")
+	TEST_RR(    "uqasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cff3a	@ uqasx	pc, r12, r10")
+	TEST_RR(    "uqsax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqsax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cff5a	@ uqsax	pc, r12, r10")
+	TEST_RR(    "uqsub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqsub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cff7a	@ uqsub16	pc, r12, r10")
+	TEST_RR(    "uqadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cff9a	@ uqadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe66000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe66fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe66000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe66fffdf") /* Unallocated space */
+	TEST_RR(    "uqsub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uqsub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe66cfffa	@ uqsub8	pc, r12, r10")
+
+	TEST_RR(    "uhadd16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhadd16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cff1a	@ uhadd16	pc, r12, r10")
+	TEST_RR(    "uhasx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhasx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cff3a	@ uhasx	pc, r12, r10")
+	TEST_RR(    "uhsax	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhsax	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cff5a	@ uhsax	pc, r12, r10")
+	TEST_RR(    "uhsub16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhsub16	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cff7a	@ uhsub16	pc, r12, r10")
+	TEST_RR(    "uhadd8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhadd8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cff9a	@ uhadd8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe67000b0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe67fffbf") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe67000d0") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe67fffdf") /* Unallocated space */
+	TEST_RR(    "uhsub8	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uhsub8	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe67cfffa	@ uhsub8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe67feffa	@ uhsub8	r14, pc, r10")
+	TEST_UNSUPPORTED(".word 0xe67cefff	@ uhsub8	r14, r12, pc")
+#endif /* __LINUX_ARM_ARCH__ >= 7 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_GROUP("Packing, unpacking, saturation, and reversal")
+
+	TEST_RR(    "pkhbt	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "pkhbt	r14,r",12, HH1,", r",10,HH2,", lsl #2")
+	TEST_UNSUPPORTED(".word 0xe68cf11a	@ pkhbt	pc, r12, r10, lsl #2")
+	TEST_RR(    "pkhtb	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "pkhtb	r14,r",12, HH1,", r",10,HH2,", asr #2")
+	TEST_UNSUPPORTED(".word 0xe68cf15a	@ pkhtb	pc, r12, r10, asr #2")
+	TEST_UNSUPPORTED(".word 0xe68fe15a	@ pkhtb	r14, pc, r10, asr #2")
+	TEST_UNSUPPORTED(".word 0xe68ce15f	@ pkhtb	r14, r12, pc, asr #2")
+	TEST_UNSUPPORTED(".word 0xe6900010") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe69fffdf") /* Unallocated space */
+
+	TEST_R(     "ssat	r0, #24, r",0,   VAL1,"")
+	TEST_R(     "ssat	r14, #24, r",12, VAL2,"")
+	TEST_R(     "ssat	r0, #24, r",0,   VAL1,", lsl #8")
+	TEST_R(     "ssat	r14, #24, r",12, VAL2,", asr #8")
+	TEST_UNSUPPORTED(".word 0xe6b7f01c	@ ssat	pc, #24, r12")
+
+	TEST_R(     "usat	r0, #24, r",0,   VAL1,"")
+	TEST_R(     "usat	r14, #24, r",12, VAL2,"")
+	TEST_R(     "usat	r0, #24, r",0,   VAL1,", lsl #8")
+	TEST_R(     "usat	r14, #24, r",12, VAL2,", asr #8")
+	TEST_UNSUPPORTED(".word 0xe6f7f01c	@ usat	pc, #24, r12")
+
+	TEST_RR(    "sxtab16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxtb16	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe68cf47a	@ sxtab16	pc,r12, r10, ror #8")
+
+	TEST_RR(    "sel	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(    "sel	r14, r",12,VAL1,", r",10, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe68cffba	@ sel	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe68fefba	@ sel	r14, pc, r10")
+	TEST_UNSUPPORTED(".word 0xe68cefbf	@ sel	r14, r12, pc")
+
+	TEST_R(     "ssat16	r0, #12, r",0,   HH1,"")
+	TEST_R(     "ssat16	r14, #12, r",12, HH2,"")
+	TEST_UNSUPPORTED(".word 0xe6abff3c	@ ssat16	pc, #12, r12")
+
+	TEST_RR(    "sxtab	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxtb	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe6acf47a	@ sxtab	pc,r12, r10, ror #8")
+
+	TEST_R(     "rev	r0, r",0,   VAL1,"")
+	TEST_R(     "rev	r14, r",12, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe6bfff3c	@ rev	pc, r12")
+
+	TEST_RR(    "sxtah	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxth	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe6bcf47a	@ sxtah	pc,r12, r10, ror #8")
+
+	TEST_R(     "rev16	r0, r",0,   VAL1,"")
+	TEST_R(     "rev16	r14, r",12, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe6bfffbc	@ rev16	pc, r12")
+
+	TEST_RR(    "uxtab16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxtb16	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe6ccf47a	@ uxtab16	pc,r12, r10, ror #8")
+
+	TEST_R(     "usat16	r0, #12, r",0,   HH1,"")
+	TEST_R(     "usat16	r14, #12, r",12, HH2,"")
+	TEST_UNSUPPORTED(".word 0xe6ecff3c	@ usat16	pc, #12, r12")
+	TEST_UNSUPPORTED(".word 0xe6ecef3f	@ usat16	r14, #12, pc")
+
+	TEST_RR(    "uxtab	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxtb	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe6ecf47a	@ uxtab	pc,r12, r10, ror #8")
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_R(     "rbit	r0, r",0,   VAL1,"")
+	TEST_R(     "rbit	r14, r",12, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe6ffff3c	@ rbit	pc, r12")
+#endif
+
+	TEST_RR(    "uxtah	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxth	r8, r",7,  HH1,"")
+	TEST_UNSUPPORTED(".word 0xe6fff077	@ uxth	pc, r7")
+	TEST_UNSUPPORTED(".word 0xe6ff807f	@ uxth	r8, pc")
+	TEST_UNSUPPORTED(".word 0xe6fcf47a	@ uxtah	pc, r12, r10, ror #8")
+	TEST_UNSUPPORTED(".word 0xe6fce47f	@ uxtah	r14, r12, pc, ror #8")
+
+	TEST_R(     "revsh	r0, r",0,   VAL1,"")
+	TEST_R(     "revsh	r14, r",12, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe6ffff3c	@ revsh	pc, r12")
+	TEST_UNSUPPORTED(".word 0xe6ffef3f	@ revsh	r14, pc")
+
+	TEST_UNSUPPORTED(".word 0xe6900070") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe69fff7f") /* Unallocated space */
+
+	TEST_UNSUPPORTED(".word 0xe6d00070") /* Unallocated space */
+	TEST_UNSUPPORTED(".word 0xe6dfff7f") /* Unallocated space */
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_GROUP("Signed multiplies")
+
+	TEST_RRR(   "smlad	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlad	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe70f8a1c	@ smlad	pc, r12, r10, r8")
+	TEST_RRR(   "smladx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smladx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe70f8a3c	@ smladx	pc, r12, r10, r8")
+
+	TEST_RR(   "smuad	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(   "smuad	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe70ffa1c	@ smuad	pc, r12, r10")
+	TEST_RR(   "smuadx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(   "smuadx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe70ffa3c	@ smuadx	pc, r12, r10")
+
+	TEST_RRR(   "smlsd	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlsd	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe70f8a5c	@ smlsd	pc, r12, r10, r8")
+	TEST_RRR(   "smlsdx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlsdx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe70f8a7c	@ smlsdx	pc, r12, r10, r8")
+
+	TEST_RR(   "smusd	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(   "smusd	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe70ffa5c	@ smusd	pc, r12, r10")
+	TEST_RR(   "smusdx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(   "smusdx	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_UNSUPPORTED(".word 0xe70ffa7c	@ smusdx	pc, r12, r10")
+
+	TEST_RRRR( "smlald	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlald	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+	TEST_UNSUPPORTED(".word 0xe74af819	@ smlald	pc, r10, r9, r8")
+	TEST_UNSUPPORTED(".word 0xe74fb819	@ smlald	r11, pc, r9, r8")
+	TEST_UNSUPPORTED(".word 0xe74ab81f	@ smlald	r11, r10, pc, r8")
+	TEST_UNSUPPORTED(".word 0xe74abf19	@ smlald	r11, r10, r9, pc")
+
+	TEST_RRRR( "smlaldx	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlaldx	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+	TEST_UNSUPPORTED(".word 0xe74af839	@ smlaldx	pc, r10, r9, r8")
+	TEST_UNSUPPORTED(".word 0xe74fb839	@ smlaldx	r11, pc, r9, r8")
+
+	TEST_RRR(  "smmla	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(  "smmla	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe75f8a1c	@ smmla	pc, r12, r10, r8")
+	TEST_RRR(  "smmlar	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(  "smmlar	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe75f8a3c	@ smmlar	pc, r12, r10, r8")
+
+	TEST_RR(   "smmul	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(   "smmul	r14, r",12,VAL2,", r",10,VAL1,"")
+	TEST_UNSUPPORTED(".word 0xe75ffa1c	@ smmul	pc, r12, r10")
+	TEST_RR(   "smmulr	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(   "smmulr	r14, r",12,VAL2,", r",10,VAL1,"")
+	TEST_UNSUPPORTED(".word 0xe75ffa3c	@ smmulr	pc, r12, r10")
+
+	TEST_RRR(  "smmls	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(  "smmls	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe75f8adc	@ smmls	pc, r12, r10, r8")
+	TEST_RRR(  "smmlsr	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(  "smmlsr	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_UNSUPPORTED(".word 0xe75f8afc	@ smmlsr	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(".word 0xe75e8aff	@ smmlsr	r14, pc, r10, r8")
+	TEST_UNSUPPORTED(".word 0xe75e8ffc	@ smmlsr	r14, r12, pc, r8")
+	TEST_UNSUPPORTED(".word 0xe75efafc	@ smmlsr	r14, r12, r10, pc")
+
+	TEST_RR(   "usad8	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(   "usad8	r14, r",12,VAL2,", r",10,VAL1,"")
+	TEST_UNSUPPORTED(".word 0xe75ffa1c	@ usad8	pc, r12, r10")
+	TEST_UNSUPPORTED(".word 0xe75efa1f	@ usad8	r14, pc, r10")
+	TEST_UNSUPPORTED(".word 0xe75eff1c	@ usad8	r14, r12, pc")
+
+	TEST_RRR(  "usada8	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL3,"")
+	TEST_RRR(  "usada8	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
+	TEST_UNSUPPORTED(".word 0xe78f8a1c	@ usada8	pc, r12, r10, r8")
+	TEST_UNSUPPORTED(".word 0xe78e8a1f	@ usada8	r14, pc, r10, r8")
+	TEST_UNSUPPORTED(".word 0xe78e8f1c	@ usada8	r14, r12, pc, r8")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_GROUP("Bit Field")
+
+	TEST_R(     "sbfx	r0, r",0  , VAL1,", #0, #31")
+	TEST_R(     "sbfxeq	r14, r",12, VAL2,", #8, #16")
+	TEST_R(     "sbfx	r4, r",10,  VAL1,", #16, #15")
+	TEST_UNSUPPORTED(".word 0xe7aff45c	@ sbfx	pc, r12, #8, #16")
+
+	TEST_R(     "ubfx	r0, r",0  , VAL1,", #0, #31")
+	TEST_R(     "ubfxcs	r14, r",12, VAL2,", #8, #16")
+	TEST_R(     "ubfx	r4, r",10,  VAL1,", #16, #15")
+	TEST_UNSUPPORTED(".word 0xe7eff45c	@ ubfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(".word 0xe7efc45f	@ ubfx	r12, pc, #8, #16")
+
+	TEST_R(     "bfc	r",0, VAL1,", #4, #20")
+	TEST_R(     "bfcvs	r",14,VAL2,", #4, #20")
+	TEST_R(     "bfc	r",7, VAL1,", #0, #31")
+	TEST_R(     "bfc	r",8, VAL2,", #0, #31")
+	TEST_UNSUPPORTED(".word 0xe7def01f	@ bfc	pc, #0, #31");
+
+	TEST_RR(    "bfi	r",0, VAL1,", r",0  , VAL2,", #0, #31")
+	TEST_RR(    "bfipl	r",12,VAL1,", r",14 , VAL2,", #4, #20")
+	TEST_UNSUPPORTED(".word 0xe7d7f21e	@ bfi	pc, r14, #4, #20")
+
+	TEST_UNSUPPORTED(".word 0x07f000f0")  /* Permanently UNDEFINED */
+	TEST_UNSUPPORTED(".word 0x07ffffff")  /* Permanently UNDEFINED */
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+	TEST_GROUP("Branch, branch with link, and block data transfer")
+
+	TEST_P(   "stmda	r",0, 16*4,", {r0}")
+	TEST_P(   "stmeqda	r",4, 16*4,", {r0-r15}")
+	TEST_P(   "stmneda	r",8, 16*4,"!, {r8-r15}")
+	TEST_P(   "stmda	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_P(   "stmda	r",13,0,   "!, {pc}")
+
+	TEST_P(   "ldmda	r",0, 16*4,", {r0}")
+	TEST_BF_P("ldmcsda	r",4, 15*4,", {r0-r15}")
+	TEST_BF_P("ldmccda	r",7, 15*4,"!, {r8-r15}")
+	TEST_P(   "ldmda	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmda	r",14,15*4,"!, {pc}")
+
+	TEST_P(   "stmia	r",0, 16*4,", {r0}")
+	TEST_P(   "stmmiia	r",4, 16*4,", {r0-r15}")
+	TEST_P(   "stmplia	r",8, 16*4,"!, {r8-r15}")
+	TEST_P(   "stmia	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_P(   "stmia	r",14,0,   "!, {pc}")
+
+	TEST_P(   "ldmia	r",0, 16*4,", {r0}")
+	TEST_BF_P("ldmvsia	r",4, 0,   ", {r0-r15}")
+	TEST_BF_P("ldmvcia	r",7, 8*4, "!, {r8-r15}")
+	TEST_P(   "ldmia	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmia	r",14,15*4,"!, {pc}")
+
+	TEST_P(   "stmdb	r",0, 16*4,", {r0}")
+	TEST_P(   "stmhidb	r",4, 16*4,", {r0-r15}")
+	TEST_P(   "stmlsdb	r",8, 16*4,"!, {r8-r15}")
+	TEST_P(   "stmdb	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_P(   "stmdb	r",13,4,   "!, {pc}")
+
+	TEST_P(   "ldmdb	r",0, 16*4,", {r0}")
+	TEST_BF_P("ldmgedb	r",4, 16*4,", {r0-r15}")
+	TEST_BF_P("ldmltdb	r",7, 16*4,"!, {r8-r15}")
+	TEST_P(   "ldmdb	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmdb	r",14,16*4,"!, {pc}")
+
+	TEST_P(   "stmib	r",0, 16*4,", {r0}")
+	TEST_P(   "stmgtib	r",4, 16*4,", {r0-r15}")
+	TEST_P(   "stmleib	r",8, 16*4,"!, {r8-r15}")
+	TEST_P(   "stmib	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_P(   "stmib	r",13,-4,  "!, {pc}")
+
+	TEST_P(   "ldmib	r",0, 16*4,", {r0}")
+	TEST_BF_P("ldmeqib	r",4, -4,", {r0-r15}")
+	TEST_BF_P("ldmneib	r",7, 7*4,"!, {r8-r15}")
+	TEST_P(   "ldmib	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmib	r",14,14*4,"!, {pc}")
+
+	TEST_P(   "stmdb	r",13,16*4,"!, {r3-r12,lr}")
+	TEST_P(	  "stmeqdb	r",13,16*4,"!, {r3-r12}")
+	TEST_P(   "stmnedb	r",2, 16*4,", {r3-r12,lr}")
+	TEST_P(   "stmdb	r",13,16*4,"!, {r2-r12,lr}")
+	TEST_P(   "stmdb	r",0, 16*4,", {r0-r12}")
+	TEST_P(   "stmdb	r",0, 16*4,", {r0-r12,lr}")
+
+	TEST_BF_P("ldmia	r",13,5*4, "!, {r3-r12,pc}")
+	TEST_P(	  "ldmccia	r",13,5*4, "!, {r3-r12}")
+	TEST_BF_P("ldmcsia	r",2, 5*4, "!, {r3-r12,pc}")
+	TEST_BF_P("ldmia	r",13,4*4, "!, {r2-r12,pc}")
+	TEST_P(   "ldmia	r",0, 16*4,", {r0-r12}")
+	TEST_P(   "ldmia	r",0, 16*4,", {r0-r12,lr}")
+
+#ifdef CONFIG_THUMB2_KERNEL
+	TEST_ARM_TO_THUMB_INTERWORK_P("ldmplia	r",0,15*4,", {pc}")
+	TEST_ARM_TO_THUMB_INTERWORK_P("ldmmiia	r",13,0,", {r0-r15}")
+#endif
+	TEST_BF("b	2f")
+	TEST_BF("bl	2f")
+	TEST_BB("b	2b")
+	TEST_BB("bl	2b")
+
+	TEST_BF("beq	2f")
+	TEST_BF("bleq	2f")
+	TEST_BB("bne	2b")
+	TEST_BB("blne	2b")
+
+	TEST_BF("bgt	2f")
+	TEST_BF("blgt	2f")
+	TEST_BB("blt	2b")
+	TEST_BB("bllt	2b")
+
+	TEST_GROUP("Supervisor Call, and coprocessor instructions")
+
+	/*
+	 * We can't really test these by executing them, so all
+	 * we can do is check that probes are, or are not allowed.
+	 * At the moment none are allowed...
+	 */
+#define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code)
+
+#define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc)					\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13, #4]")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13, #-4]")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13, #4]!")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13, #-4]!")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13], #4")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13], #-4")			\
+	TEST_COPROCESSOR("stc"two"	0, cr0, [r13], {1}")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13, #4]")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13, #-4]")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13, #4]!")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13, #-4]!")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13], #4")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13], #-4")			\
+	TEST_COPROCESSOR("stc"two"l	0, cr0, [r13], {1}")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13, #4]")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13, #-4]")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13, #4]!")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13, #-4]!")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13], #4")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13], #-4")			\
+	TEST_COPROCESSOR("ldc"two"	0, cr0, [r13], {1}")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13, #4]")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13, #-4]")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13, #4]!")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13, #-4]!")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13], #4")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13], #-4")			\
+	TEST_COPROCESSOR("ldc"two"l	0, cr0, [r13], {1}")			\
+										\
+	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15, #4]")			\
+	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15, #-4]")			\
+	TEST_UNSUPPORTED(".word 0x"cc"daf0001	@ stc"two"	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"d2f0001	@ stc"two"	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"caf0001	@ stc"two"	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c2f0001	@ stc"two"	0, cr0, [r15], #-4")	\
+	TEST_COPROCESSOR( "stc"two"	0, cr0, [r15], {1}")			\
+	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15, #4]")			\
+	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15, #-4]")			\
+	TEST_UNSUPPORTED(".word 0x"cc"def0001	@ stc"two"l	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"d6f0001	@ stc"two"l	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"cef0001	@ stc"two"l	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c6f0001	@ stc"two"l	0, cr0, [r15], #-4")	\
+	TEST_COPROCESSOR( "stc"two"l	0, cr0, [r15], {1}")			\
+	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15, #4]")			\
+	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15, #-4]")			\
+	TEST_UNSUPPORTED(".word 0x"cc"dbf0001	@ ldc"two"	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"d3f0001	@ ldc"two"	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"cbf0001	@ ldc"two"	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c3f0001	@ ldc"two"	0, cr0, [r15], #-4")	\
+	TEST_COPROCESSOR( "ldc"two"	0, cr0, [r15], {1}")			\
+	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15, #4]")			\
+	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15, #-4]")			\
+	TEST_UNSUPPORTED(".word 0x"cc"dff0001	@ ldc"two"l	0, cr0, [r15, #4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"d7f0001	@ ldc"two"l	0, cr0, [r15, #-4]!")	\
+	TEST_UNSUPPORTED(".word 0x"cc"cff0001	@ ldc"two"l	0, cr0, [r15], #4")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c7f0001	@ ldc"two"l	0, cr0, [r15], #-4")	\
+	TEST_COPROCESSOR( "ldc"two"l	0, cr0, [r15], {1}")
+
+#define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc)					\
+										\
+	TEST_COPROCESSOR( "mcrr"two"	0, 15, r0, r14, cr0")			\
+	TEST_COPROCESSOR( "mcrr"two"	15, 0, r14, r0, cr15")			\
+	TEST_UNSUPPORTED(".word 0x"cc"c4f00f0	@ mcrr"two"	0, 15, r0, r15, cr0")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c40ff0f	@ mcrr"two"	15, 0, r15, r0, cr15")	\
+	TEST_COPROCESSOR( "mrrc"two"	0, 15, r0, r14, cr0")			\
+	TEST_COPROCESSOR( "mrrc"two"	15, 0, r14, r0, cr15")			\
+	TEST_UNSUPPORTED(".word 0x"cc"c5f00f0	@ mrrc"two"	0, 15, r0, r15, cr0")	\
+	TEST_UNSUPPORTED(".word 0x"cc"c50ff0f	@ mrrc"two"	15, 0, r15, r0, cr15")	\
+	TEST_COPROCESSOR( "cdp"two"	15, 15, cr15, cr15, cr15, 7")		\
+	TEST_COPROCESSOR( "cdp"two"	0, 0, cr0, cr0, cr0, 0")		\
+	TEST_COPROCESSOR( "mcr"two"	15, 7, r15, cr15, cr15, 7")		\
+	TEST_COPROCESSOR( "mcr"two"	0, 0, r0, cr0, cr0, 0")			\
+	TEST_COPROCESSOR( "mrc"two"	15, 7, r15, cr15, cr15, 7")		\
+	TEST_COPROCESSOR( "mrc"two"	0, 0, r0, cr0, cr0, 0")
+
+	COPROCESSOR_INSTRUCTIONS_ST_LD("","e")
+	COPROCESSOR_INSTRUCTIONS_MC_MR("","e")
+	TEST_UNSUPPORTED("svc	0")
+	TEST_UNSUPPORTED("svc	0xffffff")
+
+	TEST_UNSUPPORTED("svc	0")
+
+	TEST_GROUP("Unconditional instruction")
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_UNSUPPORTED("srsda	sp, 0x13")
+	TEST_UNSUPPORTED("srsdb	sp, 0x13")
+	TEST_UNSUPPORTED("srsia	sp, 0x13")
+	TEST_UNSUPPORTED("srsib	sp, 0x13")
+	TEST_UNSUPPORTED("srsda	sp!, 0x13")
+	TEST_UNSUPPORTED("srsdb	sp!, 0x13")
+	TEST_UNSUPPORTED("srsia	sp!, 0x13")
+	TEST_UNSUPPORTED("srsib	sp!, 0x13")
+
+	TEST_UNSUPPORTED("rfeda	sp")
+	TEST_UNSUPPORTED("rfedb	sp")
+	TEST_UNSUPPORTED("rfeia	sp")
+	TEST_UNSUPPORTED("rfeib	sp")
+	TEST_UNSUPPORTED("rfeda	sp!")
+	TEST_UNSUPPORTED("rfedb	sp!")
+	TEST_UNSUPPORTED("rfeia	sp!")
+	TEST_UNSUPPORTED("rfeib	sp!")
+	TEST_UNSUPPORTED(".word 0xf81d0a00	@ rfeda	pc")
+	TEST_UNSUPPORTED(".word 0xf91d0a00	@ rfedb	pc")
+	TEST_UNSUPPORTED(".word 0xf89d0a00	@ rfeia	pc")
+	TEST_UNSUPPORTED(".word 0xf99d0a00	@ rfeib	pc")
+	TEST_UNSUPPORTED(".word 0xf83d0a00	@ rfeda	pc!")
+	TEST_UNSUPPORTED(".word 0xf93d0a00	@ rfedb	pc!")
+	TEST_UNSUPPORTED(".word 0xf8bd0a00	@ rfeia	pc!")
+	TEST_UNSUPPORTED(".word 0xf9bd0a00	@ rfeib	pc!")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_X(	"blx	__dummy_thumb_subroutine_even",
+		".thumb				\n\t"
+		".space 4			\n\t"
+		".type __dummy_thumb_subroutine_even, %%function \n\t"
+		"__dummy_thumb_subroutine_even:	\n\t"
+		"mov	r0, pc			\n\t"
+		"bx	lr			\n\t"
+		".arm				\n\t"
+	)
+	TEST(	"blx	__dummy_thumb_subroutine_even")
+
+	TEST_X(	"blx	__dummy_thumb_subroutine_odd",
+		".thumb				\n\t"
+		".space 2			\n\t"
+		".type __dummy_thumb_subroutine_odd, %%function	\n\t"
+		"__dummy_thumb_subroutine_odd:	\n\t"
+		"mov	r0, pc			\n\t"
+		"bx	lr			\n\t"
+		".arm				\n\t"
+	)
+	TEST(	"blx	__dummy_thumb_subroutine_odd")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+	COPROCESSOR_INSTRUCTIONS_ST_LD("2","f")
+#if __LINUX_ARM_ARCH__ >= 6
+	COPROCESSOR_INSTRUCTIONS_MC_MR("2","f")
+#endif
+
+	TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions")
+
+#if __LINUX_ARM_ARCH__ >= 6
+	TEST_UNSUPPORTED("cps	0x13")
+	TEST_UNSUPPORTED("cpsie	i")
+	TEST_UNSUPPORTED("cpsid	i")
+	TEST_UNSUPPORTED("cpsie	i,0x13")
+	TEST_UNSUPPORTED("cpsid	i,0x13")
+	TEST_UNSUPPORTED("setend	le")
+	TEST_UNSUPPORTED("setend	be")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_P("pli	[r",0,0b,", #16]")
+	TEST(  "pli	[pc, #0]")
+	TEST_RR("pli	[r",12,0b,", r",0, 16,"]")
+	TEST_RR("pli	[r",0, 0b,", -r",12,16,", lsl #4]")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 5
+	TEST_P("pld	[r",0,32,", #-16]")
+	TEST(  "pld	[pc, #0]")
+	TEST_PR("pld	[r",7, 24, ", r",0, 16,"]")
+	TEST_PR("pld	[r",8, 24, ", -r",12,16,", lsl #4]")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_SUPPORTED(  ".word 0xf590f000	@ pldw [r0, #0]")
+	TEST_SUPPORTED(  ".word 0xf797f000	@ pldw	[r7, r0]")
+	TEST_SUPPORTED(  ".word 0xf798f18c	@ pldw	[r8, r12, lsl #3]");
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+	TEST_UNSUPPORTED("clrex")
+	TEST_UNSUPPORTED("dsb")
+	TEST_UNSUPPORTED("dmb")
+	TEST_UNSUPPORTED("isb")
+#endif
+
+	verbose("\n");
+}
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-thumb.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-thumb.c
new file mode 100644
index 0000000..5d8b857
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test-thumb.c
@@ -0,0 +1,1187 @@
+/*
+ * arch/arm/kernel/kprobes-test-thumb.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "kprobes-test.h"
+
+
+#define TEST_ISA "16"
+
+#define DONT_TEST_IN_ITBLOCK(tests)			\
+	kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK;	\
+	tests						\
+	kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK;
+
+#define CONDITION_INSTRUCTIONS(cc_pos, tests)		\
+	kprobe_test_cc_position = cc_pos;		\
+	DONT_TEST_IN_ITBLOCK(tests)			\
+	kprobe_test_cc_position = 0;
+
+#define TEST_ITBLOCK(code)				\
+	kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK;	\
+	TESTCASE_START(code)				\
+	TEST_ARG_END("")				\
+	"50:	nop			\n\t"		\
+	"1:	"code"			\n\t"		\
+	"	mov r1, #0x11		\n\t"		\
+	"	mov r2, #0x22		\n\t"		\
+	"	mov r3, #0x33		\n\t"		\
+	"2:	nop			\n\t"		\
+	TESTCASE_END					\
+	kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK;
+
+#define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2)	\
+	TESTCASE_START(code1 #reg code2)			\
+	TEST_ARG_PTR(reg, val)					\
+	TEST_ARG_REG(14, 99f+1)					\
+	TEST_ARG_MEM(15, 3f)					\
+	TEST_ARG_END("")					\
+	"	nop			\n\t" /* To align 1f */	\
+	"50:	nop			\n\t"			\
+	"1:	"code1 #reg code2"	\n\t"			\
+	"	bx	lr		\n\t"			\
+	".arm				\n\t"			\
+	"3:	adr	lr, 2f+1	\n\t"			\
+	"	bx	lr		\n\t"			\
+	".thumb				\n\t"			\
+	"2:	nop			\n\t"			\
+	TESTCASE_END
+
+
+void kprobe_thumb16_test_cases(void)
+{
+	kprobe_test_flags = TEST_FLAG_NARROW_INSTR;
+
+	TEST_GROUP("Shift (immediate), add, subtract, move, and compare")
+
+	TEST_R(    "lsls	r7, r",0,VAL1,", #5")
+	TEST_R(    "lsls	r0, r",7,VAL2,", #11")
+	TEST_R(    "lsrs	r7, r",0,VAL1,", #5")
+	TEST_R(    "lsrs	r0, r",7,VAL2,", #11")
+	TEST_R(    "asrs	r7, r",0,VAL1,", #5")
+	TEST_R(    "asrs	r0, r",7,VAL2,", #11")
+	TEST_RR(   "adds	r2, r",0,VAL1,", r",7,VAL2,"")
+	TEST_RR(   "adds	r5, r",7,VAL2,", r",0,VAL2,"")
+	TEST_RR(   "subs	r2, r",0,VAL1,", r",7,VAL2,"")
+	TEST_RR(   "subs	r5, r",7,VAL2,", r",0,VAL2,"")
+	TEST_R(    "adds	r7, r",0,VAL1,", #5")
+	TEST_R(    "adds	r0, r",7,VAL2,", #2")
+	TEST_R(    "subs	r7, r",0,VAL1,", #5")
+	TEST_R(    "subs	r0, r",7,VAL2,", #2")
+	TEST(      "movs.n	r0, #0x5f")
+	TEST(      "movs.n	r7, #0xa0")
+	TEST_R(    "cmp.n	r",0,0x5e, ", #0x5f")
+	TEST_R(    "cmp.n	r",5,0x15f,", #0x5f")
+	TEST_R(    "cmp.n	r",7,0xa0, ", #0xa0")
+	TEST_R(    "adds.n	r",0,VAL1,", #0x5f")
+	TEST_R(    "adds.n	r",7,VAL2,", #0xa0")
+	TEST_R(    "subs.n	r",0,VAL1,", #0x5f")
+	TEST_R(    "subs.n	r",7,VAL2,", #0xa0")
+
+	TEST_GROUP("16-bit Thumb data-processing instructions")
+
+#define DATA_PROCESSING16(op,val)			\
+	TEST_RR(   op"	r",0,VAL1,", r",7,val,"")	\
+	TEST_RR(   op"	r",7,VAL2,", r",0,val,"")
+
+	DATA_PROCESSING16("ands",0xf00f00ff)
+	DATA_PROCESSING16("eors",0xf00f00ff)
+	DATA_PROCESSING16("lsls",11)
+	DATA_PROCESSING16("lsrs",11)
+	DATA_PROCESSING16("asrs",11)
+	DATA_PROCESSING16("adcs",VAL2)
+	DATA_PROCESSING16("sbcs",VAL2)
+	DATA_PROCESSING16("rors",11)
+	DATA_PROCESSING16("tst",0xf00f00ff)
+	TEST_R("rsbs	r",0,VAL1,", #0")
+	TEST_R("rsbs	r",7,VAL2,", #0")
+	DATA_PROCESSING16("cmp",0xf00f00ff)
+	DATA_PROCESSING16("cmn",0xf00f00ff)
+	DATA_PROCESSING16("orrs",0xf00f00ff)
+	DATA_PROCESSING16("muls",VAL2)
+	DATA_PROCESSING16("bics",0xf00f00ff)
+	DATA_PROCESSING16("mvns",VAL2)
+
+	TEST_GROUP("Special data instructions and branch and exchange")
+
+	TEST_RR(  "add	r",0, VAL1,", r",7,VAL2,"")
+	TEST_RR(  "add	r",3, VAL2,", r",8,VAL3,"")
+	TEST_RR(  "add	r",8, VAL3,", r",0,VAL1,"")
+	TEST_R(   "add	sp"        ", r",8,-8,  "")
+	TEST_R(   "add	r",14,VAL1,", pc")
+	TEST_BF_R("add	pc"        ", r",0,2f-1f-8,"")
+	TEST_UNSUPPORTED(".short 0x44ff	@ add pc, pc")
+
+	TEST_RR(  "cmp	r",3,VAL1,", r",8,VAL2,"")
+	TEST_RR(  "cmp	r",8,VAL2,", r",0,VAL1,"")
+	TEST_R(   "cmp	sp"       ", r",8,-8,  "")
+
+	TEST_R(   "mov	r0, r",7,VAL2,"")
+	TEST_R(   "mov	r3, r",8,VAL3,"")
+	TEST_R(   "mov	r8, r",0,VAL1,"")
+	TEST_P(   "mov	sp, r",8,-8,  "")
+	TEST(     "mov	lr, pc")
+	TEST_BF_R("mov	pc, r",0,2f,  "")
+
+	TEST_BF_R("bx	r",0, 2f+1,"")
+	TEST_BF_R("bx	r",14,2f+1,"")
+	TESTCASE_START("bx	pc")
+		TEST_ARG_REG(14, 99f+1)
+		TEST_ARG_END("")
+		"	nop			\n\t" /* To align the bx pc*/
+		"50:	nop			\n\t"
+		"1:	bx	pc		\n\t"
+		"	bx	lr		\n\t"
+		".arm				\n\t"
+		"	adr	lr, 2f+1	\n\t"
+		"	bx	lr		\n\t"
+		".thumb				\n\t"
+		"2:	nop			\n\t"
+	TESTCASE_END
+
+	TEST_BF_R("blx	r",0, 2f+1,"")
+	TEST_BB_R("blx	r",14,2f+1,"")
+	TEST_UNSUPPORTED(".short 0x47f8	@ blx pc")
+
+	TEST_GROUP("Load from Literal Pool")
+
+	TEST_X( "ldr	r0, 3f",
+		".align					\n\t"
+		"3:	.word	"__stringify(VAL1))
+	TEST_X( "ldr	r7, 3f",
+		".space 128				\n\t"
+		".align					\n\t"
+		"3:	.word	"__stringify(VAL2))
+
+	TEST_GROUP("16-bit Thumb Load/store instructions")
+
+	TEST_RPR("str	r",0, VAL1,", [r",1, 24,", r",2,  48,"]")
+	TEST_RPR("str	r",7, VAL2,", [r",6, 24,", r",5,  48,"]")
+	TEST_RPR("strh	r",0, VAL1,", [r",1, 24,", r",2,  48,"]")
+	TEST_RPR("strh	r",7, VAL2,", [r",6, 24,", r",5,  48,"]")
+	TEST_RPR("strb	r",0, VAL1,", [r",1, 24,", r",2,  48,"]")
+	TEST_RPR("strb	r",7, VAL2,", [r",6, 24,", r",5,  48,"]")
+	TEST_PR( "ldrsb	r0, [r",1, 24,", r",2,  48,"]")
+	TEST_PR( "ldrsb	r7, [r",6, 24,", r",5,  50,"]")
+	TEST_PR( "ldr	r0, [r",1, 24,", r",2,  48,"]")
+	TEST_PR( "ldr	r7, [r",6, 24,", r",5,  48,"]")
+	TEST_PR( "ldrh	r0, [r",1, 24,", r",2,  48,"]")
+	TEST_PR( "ldrh	r7, [r",6, 24,", r",5,  50,"]")
+	TEST_PR( "ldrb	r0, [r",1, 24,", r",2,  48,"]")
+	TEST_PR( "ldrb	r7, [r",6, 24,", r",5,  50,"]")
+	TEST_PR( "ldrsh	r0, [r",1, 24,", r",2,  48,"]")
+	TEST_PR( "ldrsh	r7, [r",6, 24,", r",5,  50,"]")
+
+	TEST_RP("str	r",0, VAL1,", [r",1, 24,", #120]")
+	TEST_RP("str	r",7, VAL2,", [r",6, 24,", #120]")
+	TEST_P( "ldr	r0, [r",1, 24,", #120]")
+	TEST_P( "ldr	r7, [r",6, 24,", #120]")
+	TEST_RP("strb	r",0, VAL1,", [r",1, 24,", #30]")
+	TEST_RP("strb	r",7, VAL2,", [r",6, 24,", #30]")
+	TEST_P( "ldrb	r0, [r",1, 24,", #30]")
+	TEST_P( "ldrb	r7, [r",6, 24,", #30]")
+	TEST_RP("strh	r",0, VAL1,", [r",1, 24,", #60]")
+	TEST_RP("strh	r",7, VAL2,", [r",6, 24,", #60]")
+	TEST_P( "ldrh	r0, [r",1, 24,", #60]")
+	TEST_P( "ldrh	r7, [r",6, 24,", #60]")
+
+	TEST_R( "str	r",0, VAL1,", [sp, #0]")
+	TEST_R( "str	r",7, VAL2,", [sp, #160]")
+	TEST(   "ldr	r0, [sp, #0]")
+	TEST(   "ldr	r7, [sp, #160]")
+
+	TEST_RP("str	r",0, VAL1,", [r",0, 24,"]")
+	TEST_P( "ldr	r0, [r",0, 24,"]")
+
+	TEST_GROUP("Generate PC-/SP-relative address")
+
+	TEST("add	r0, pc, #4")
+	TEST("add	r7, pc, #1020")
+	TEST("add	r0, sp, #4")
+	TEST("add	r7, sp, #1020")
+
+	TEST_GROUP("Miscellaneous 16-bit instructions")
+
+	TEST_UNSUPPORTED( "cpsie	i")
+	TEST_UNSUPPORTED( "cpsid	i")
+	TEST_UNSUPPORTED( "setend	le")
+	TEST_UNSUPPORTED( "setend	be")
+
+	TEST("add	sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */
+	TEST("sub	sp, #0x7f*4")
+
+DONT_TEST_IN_ITBLOCK(
+	TEST_BF_R(  "cbnz	r",0,0, ", 2f")
+	TEST_BF_R(  "cbz	r",2,-1,", 2f")
+	TEST_BF_RX( "cbnz	r",4,1, ", 2f", SPACE_0x20)
+	TEST_BF_RX( "cbz	r",7,0, ", 2f", SPACE_0x40)
+)
+	TEST_R("sxth	r0, r",7, HH1,"")
+	TEST_R("sxth	r7, r",0, HH2,"")
+	TEST_R("sxtb	r0, r",7, HH1,"")
+	TEST_R("sxtb	r7, r",0, HH2,"")
+	TEST_R("uxth	r0, r",7, HH1,"")
+	TEST_R("uxth	r7, r",0, HH2,"")
+	TEST_R("uxtb	r0, r",7, HH1,"")
+	TEST_R("uxtb	r7, r",0, HH2,"")
+	TEST_R("rev	r0, r",7, VAL1,"")
+	TEST_R("rev	r7, r",0, VAL2,"")
+	TEST_R("rev16	r0, r",7, VAL1,"")
+	TEST_R("rev16	r7, r",0, VAL2,"")
+	TEST_UNSUPPORTED(".short 0xba80")
+	TEST_UNSUPPORTED(".short 0xbabf")
+	TEST_R("revsh	r0, r",7, VAL1,"")
+	TEST_R("revsh	r7, r",0, VAL2,"")
+
+#define TEST_POPPC(code, offset)	\
+	TESTCASE_START(code)		\
+	TEST_ARG_PTR(13, offset)	\
+	TEST_ARG_END("")		\
+	TEST_BRANCH_F(code)		\
+	TESTCASE_END
+
+	TEST("push	{r0}")
+	TEST("push	{r7}")
+	TEST("push	{r14}")
+	TEST("push	{r0-r7,r14}")
+	TEST("push	{r0,r2,r4,r6,r14}")
+	TEST("push	{r1,r3,r5,r7}")
+	TEST("pop	{r0}")
+	TEST("pop	{r7}")
+	TEST("pop	{r0,r2,r4,r6}")
+	TEST_POPPC("pop	{pc}",15*4)
+	TEST_POPPC("pop	{r0-r7,pc}",7*4)
+	TEST_POPPC("pop	{r1,r3,r5,r7,pc}",11*4)
+	TEST_THUMB_TO_ARM_INTERWORK_P("pop	{pc}	@ ",13,15*4,"")
+	TEST_THUMB_TO_ARM_INTERWORK_P("pop	{r0-r7,pc}	@ ",13,7*4,"")
+
+	TEST_UNSUPPORTED("bkpt.n	0")
+	TEST_UNSUPPORTED("bkpt.n	255")
+
+	TEST_SUPPORTED("yield")
+	TEST("sev")
+	TEST("nop")
+	TEST("wfi")
+	TEST_SUPPORTED("wfe")
+	TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */
+	TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */
+
+#define TEST_IT(code, code2)			\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	"50:	nop			\n\t"	\
+	"1:	"code"			\n\t"	\
+	"	"code2"			\n\t"	\
+	"2:	nop			\n\t"	\
+	TESTCASE_END
+
+DONT_TEST_IN_ITBLOCK(
+	TEST_IT("it	eq","moveq r0,#0")
+	TEST_IT("it	vc","movvc r0,#0")
+	TEST_IT("it	le","movle r0,#0")
+	TEST_IT("ite	eq","moveq r0,#0\n\t  movne r1,#1")
+	TEST_IT("itet	vc","movvc r0,#0\n\t  movvs r1,#1\n\t  movvc r2,#2")
+	TEST_IT("itete	le","movle r0,#0\n\t  movgt r1,#1\n\t  movle r2,#2\n\t  movgt r3,#3")
+	TEST_IT("itttt	le","movle r0,#0\n\t  movle r1,#1\n\t  movle r2,#2\n\t  movle r3,#3")
+	TEST_IT("iteee	le","movle r0,#0\n\t  movgt r1,#1\n\t  movgt r2,#2\n\t  movgt r3,#3")
+)
+
+	TEST_GROUP("Load and store multiple")
+
+	TEST_P("ldmia	r",4, 16*4,"!, {r0,r7}")
+	TEST_P("ldmia	r",7, 16*4,"!, {r0-r6}")
+	TEST_P("stmia	r",4, 16*4,"!, {r0,r7}")
+	TEST_P("stmia	r",0, 16*4,"!, {r0-r7}")
+
+	TEST_GROUP("Conditional branch and Supervisor Call instructions")
+
+CONDITION_INSTRUCTIONS(8,
+	TEST_BF("beq	2f")
+	TEST_BB("bne	2b")
+	TEST_BF("bgt	2f")
+	TEST_BB("blt	2b")
+)
+	TEST_UNSUPPORTED(".short 0xde00")
+	TEST_UNSUPPORTED(".short 0xdeff")
+	TEST_UNSUPPORTED("svc	#0x00")
+	TEST_UNSUPPORTED("svc	#0xff")
+
+	TEST_GROUP("Unconditional branch")
+
+	TEST_BF(  "b	2f")
+	TEST_BB(  "b	2b")
+	TEST_BF_X("b	2f", SPACE_0x400)
+	TEST_BB_X("b	2b", SPACE_0x400)
+
+	TEST_GROUP("Testing instructions in IT blocks")
+
+	TEST_ITBLOCK("subs.n r0, r0")
+
+	verbose("\n");
+}
+
+
+void kprobe_thumb32_test_cases(void)
+{
+	kprobe_test_flags = 0;
+
+	TEST_GROUP("Load/store multiple")
+
+	TEST_UNSUPPORTED("rfedb	sp")
+	TEST_UNSUPPORTED("rfeia	sp")
+	TEST_UNSUPPORTED("rfedb	sp!")
+	TEST_UNSUPPORTED("rfeia	sp!")
+
+	TEST_P(   "stmia	r",0, 16*4,", {r0,r8}")
+	TEST_P(   "stmia	r",4, 16*4,", {r0-r12,r14}")
+	TEST_P(   "stmia	r",7, 16*4,"!, {r8-r12,r14}")
+	TEST_P(   "stmia	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+
+	TEST_P(   "ldmia	r",0, 16*4,", {r0,r8}")
+	TEST_P(   "ldmia	r",4, 0,   ", {r0-r12,r14}")
+	TEST_BF_P("ldmia	r",5, 8*4, "!, {r6-r12,r15}")
+	TEST_P(   "ldmia	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmia	r",14,14*4,"!, {r4,pc}")
+
+	TEST_P(   "stmdb	r",0, 16*4,", {r0,r8}")
+	TEST_P(   "stmdb	r",4, 16*4,", {r0-r12,r14}")
+	TEST_P(   "stmdb	r",5, 16*4,"!, {r8-r12,r14}")
+	TEST_P(   "stmdb	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+
+	TEST_P(   "ldmdb	r",0, 16*4,", {r0,r8}")
+	TEST_P(   "ldmdb	r",4, 16*4,", {r0-r12,r14}")
+	TEST_BF_P("ldmdb	r",5, 16*4,"!, {r6-r12,r15}")
+	TEST_P(   "ldmdb	r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+	TEST_BF_P("ldmdb	r",14,16*4,"!, {r4,pc}")
+
+	TEST_P(   "stmdb	r",13,16*4,"!, {r3-r12,lr}")
+	TEST_P(	  "stmdb	r",13,16*4,"!, {r3-r12}")
+	TEST_P(   "stmdb	r",2, 16*4,", {r3-r12,lr}")
+	TEST_P(   "stmdb	r",13,16*4,"!, {r2-r12,lr}")
+	TEST_P(   "stmdb	r",0, 16*4,", {r0-r12}")
+	TEST_P(   "stmdb	r",0, 16*4,", {r0-r12,lr}")
+
+	TEST_BF_P("ldmia	r",13,5*4, "!, {r3-r12,pc}")
+	TEST_P(	  "ldmia	r",13,5*4, "!, {r3-r12}")
+	TEST_BF_P("ldmia	r",2, 5*4, "!, {r3-r12,pc}")
+	TEST_BF_P("ldmia	r",13,4*4, "!, {r2-r12,pc}")
+	TEST_P(   "ldmia	r",0, 16*4,", {r0-r12}")
+	TEST_P(   "ldmia	r",0, 16*4,", {r0-r12,lr}")
+
+	TEST_THUMB_TO_ARM_INTERWORK_P("ldmia	r",0,14*4,", {r12,pc}")
+	TEST_THUMB_TO_ARM_INTERWORK_P("ldmia	r",13,2*4,", {r0-r12,pc}")
+
+	TEST_UNSUPPORTED(".short 0xe88f,0x0101	@ stmia	pc, {r0,r8}")
+	TEST_UNSUPPORTED(".short 0xe92f,0x5f00	@ stmdb	pc!, {r8-r12,r14}")
+	TEST_UNSUPPORTED(".short 0xe8bd,0xc000	@ ldmia	r13!, {r14,pc}")
+	TEST_UNSUPPORTED(".short 0xe93e,0xc000	@ ldmdb	r14!, {r14,pc}")
+	TEST_UNSUPPORTED(".short 0xe8a7,0x3f00	@ stmia	r7!, {r8-r12,sp}")
+	TEST_UNSUPPORTED(".short 0xe8a7,0x9f00	@ stmia	r7!, {r8-r12,pc}")
+	TEST_UNSUPPORTED(".short 0xe93e,0x2010	@ ldmdb	r14!, {r4,sp}")
+
+	TEST_GROUP("Load/store double or exclusive, table branch")
+
+	TEST_P(  "ldrd	r0, r1, [r",1, 24,", #-16]")
+	TEST(    "ldrd	r12, r14, [sp, #16]")
+	TEST_P(  "ldrd	r1, r0, [r",7, 24,", #-16]!")
+	TEST(    "ldrd	r14, r12, [sp, #16]!")
+	TEST_P(  "ldrd	r1, r0, [r",7, 24,"], #16")
+	TEST(    "ldrd	r7, r8, [sp], #-16")
+
+	TEST_X( "ldrd	r12, r14, 3f",
+		".align 3				\n\t"
+		"3:	.word	"__stringify(VAL1)"	\n\t"
+		"	.word	"__stringify(VAL2))
+
+	TEST_UNSUPPORTED(".short 0xe9ff,0xec04	@ ldrd	r14, r12, [pc, #16]!")
+	TEST_UNSUPPORTED(".short 0xe8ff,0xec04	@ ldrd	r14, r12, [pc], #16")
+	TEST_UNSUPPORTED(".short 0xe9d4,0xd800	@ ldrd	sp, r8, [r4]")
+	TEST_UNSUPPORTED(".short 0xe9d4,0xf800	@ ldrd	pc, r8, [r4]")
+	TEST_UNSUPPORTED(".short 0xe9d4,0x7d00	@ ldrd	r7, sp, [r4]")
+	TEST_UNSUPPORTED(".short 0xe9d4,0x7f00	@ ldrd	r7, pc, [r4]")
+
+	TEST_RRP("strd	r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
+	TEST_RR( "strd	r",12,VAL2,", r",14,VAL1,", [sp, #16]")
+	TEST_RRP("strd	r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!")
+	TEST_RR( "strd	r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
+	TEST_RRP("strd	r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
+	TEST_RR( "strd	r",7, VAL2,", r",8, VAL1,", [sp], #-16")
+	TEST_UNSUPPORTED(".short 0xe9ef,0xec04	@ strd	r14, r12, [pc, #16]!")
+	TEST_UNSUPPORTED(".short 0xe8ef,0xec04	@ strd	r14, r12, [pc], #16")
+
+	TEST_RX("tbb	[pc, r",0, (9f-(1f+4)),"]",
+		"9:			\n\t"
+		".byte	(2f-1b-4)>>1	\n\t"
+		".byte	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_RX("tbb	[pc, r",4, (9f-(1f+4)+1),"]",
+		"9:			\n\t"
+		".byte	(2f-1b-4)>>1	\n\t"
+		".byte	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_RRX("tbb	[r",1,9f,", r",2,0,"]",
+		"9:			\n\t"
+		".byte	(2f-1b-4)>>1	\n\t"
+		".byte	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_RX("tbh	[pc, r",7, (9f-(1f+4))>>1,"]",
+		"9:			\n\t"
+		".short	(2f-1b-4)>>1	\n\t"
+		".short	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_RX("tbh	[pc, r",12, ((9f-(1f+4))>>1)+1,"]",
+		"9:			\n\t"
+		".short	(2f-1b-4)>>1	\n\t"
+		".short	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_RRX("tbh	[r",1,9f, ", r",14,1,"]",
+		"9:			\n\t"
+		".short	(2f-1b-4)>>1	\n\t"
+		".short	(3f-1b-4)>>1	\n\t"
+		"3:	mvn	r0, r0	\n\t"
+		"2:	nop		\n\t")
+
+	TEST_UNSUPPORTED(".short 0xe8d1,0xf01f	@ tbh [r1, pc]")
+	TEST_UNSUPPORTED(".short 0xe8d1,0xf01d	@ tbh [r1, sp]")
+	TEST_UNSUPPORTED(".short 0xe8dd,0xf012	@ tbh [sp, r2]")
+
+	TEST_UNSUPPORTED("strexb	r0, r1, [r2]")
+	TEST_UNSUPPORTED("strexh	r0, r1, [r2]")
+	TEST_UNSUPPORTED("strexd	r0, r1, [r2]")
+	TEST_UNSUPPORTED("ldrexb	r0, [r1]")
+	TEST_UNSUPPORTED("ldrexh	r0, [r1]")
+	TEST_UNSUPPORTED("ldrexd	r0, [r1]")
+
+	TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
+
+#define _DATA_PROCESSING32_DNM(op,s,val)					\
+	TEST_RR(op s".w	r0,  r",1, VAL1,", r",2, val, "")			\
+	TEST_RR(op s"	r1,  r",1, VAL1,", r",2, val, ", lsl #3")		\
+	TEST_RR(op s"	r2,  r",3, VAL1,", r",2, val, ", lsr #4")		\
+	TEST_RR(op s"	r3,  r",3, VAL1,", r",2, val, ", asr #5")		\
+	TEST_RR(op s"	r4,  r",5, VAL1,", r",2, N(val),", asr #6")		\
+	TEST_RR(op s"	r5,  r",5, VAL1,", r",2, val, ", ror #7")		\
+	TEST_RR(op s"	r8,  r",9, VAL1,", r",10,val, ", rrx")			\
+	TEST_R( op s"	r0,  r",11,VAL1,", #0x00010001")			\
+	TEST_R( op s"	r11, r",0, VAL1,", #0xf5000000")			\
+	TEST_R( op s"	r7,  r",8, VAL2,", #0x000af000")
+
+#define DATA_PROCESSING32_DNM(op,val)		\
+	_DATA_PROCESSING32_DNM(op,"",val)	\
+	_DATA_PROCESSING32_DNM(op,"s",val)
+
+#define DATA_PROCESSING32_NM(op,val)					\
+	TEST_RR(op".w	r",1, VAL1,", r",2, val, "")			\
+	TEST_RR(op"	r",1, VAL1,", r",2, val, ", lsl #3")		\
+	TEST_RR(op"	r",3, VAL1,", r",2, val, ", lsr #4")		\
+	TEST_RR(op"	r",3, VAL1,", r",2, val, ", asr #5")		\
+	TEST_RR(op"	r",5, VAL1,", r",2, N(val),", asr #6")		\
+	TEST_RR(op"	r",5, VAL1,", r",2, val, ", ror #7")		\
+	TEST_RR(op"	r",9, VAL1,", r",10,val, ", rrx")		\
+	TEST_R( op"	r",11,VAL1,", #0x00010001")			\
+	TEST_R( op"	r",0, VAL1,", #0xf5000000")			\
+	TEST_R( op"	r",8, VAL2,", #0x000af000")
+
+#define _DATA_PROCESSING32_DM(op,s,val)				\
+	TEST_R( op s".w	r0,  r",14, val, "")			\
+	TEST_R( op s"	r1,  r",12, val, ", lsl #3")		\
+	TEST_R( op s"	r2,  r",11, val, ", lsr #4")		\
+	TEST_R( op s"	r3,  r",10, val, ", asr #5")		\
+	TEST_R( op s"	r4,  r",9, N(val),", asr #6")		\
+	TEST_R( op s"	r5,  r",8, val, ", ror #7")		\
+	TEST_R( op s"	r8,  r",7,val, ", rrx")			\
+	TEST(   op s"	r0,  #0x00010001")			\
+	TEST(   op s"	r11, #0xf5000000")			\
+	TEST(   op s"	r7,  #0x000af000")			\
+	TEST(   op s"	r4,  #0x00005a00")
+
+#define DATA_PROCESSING32_DM(op,val)		\
+	_DATA_PROCESSING32_DM(op,"",val)	\
+	_DATA_PROCESSING32_DM(op,"s",val)
+
+	DATA_PROCESSING32_DNM("and",0xf00f00ff)
+	DATA_PROCESSING32_NM("tst",0xf00f00ff)
+	DATA_PROCESSING32_DNM("bic",0xf00f00ff)
+	DATA_PROCESSING32_DNM("orr",0xf00f00ff)
+	DATA_PROCESSING32_DM("mov",VAL2)
+	DATA_PROCESSING32_DNM("orn",0xf00f00ff)
+	DATA_PROCESSING32_DM("mvn",VAL2)
+	DATA_PROCESSING32_DNM("eor",0xf00f00ff)
+	DATA_PROCESSING32_NM("teq",0xf00f00ff)
+	DATA_PROCESSING32_DNM("add",VAL2)
+	DATA_PROCESSING32_NM("cmn",VAL2)
+	DATA_PROCESSING32_DNM("adc",VAL2)
+	DATA_PROCESSING32_DNM("sbc",VAL2)
+	DATA_PROCESSING32_DNM("sub",VAL2)
+	DATA_PROCESSING32_NM("cmp",VAL2)
+	DATA_PROCESSING32_DNM("rsb",VAL2)
+
+	TEST_RR("pkhbt	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR("pkhbt	r14,r",12, HH1,", r",10,HH2,", lsl #2")
+	TEST_RR("pkhtb	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR("pkhtb	r14,r",12, HH1,", r",10,HH2,", asr #2")
+
+	TEST_UNSUPPORTED(".short 0xea17,0x0f0d	@ tst.w r7, sp")
+	TEST_UNSUPPORTED(".short 0xea17,0x0f0f	@ tst.w r7, pc")
+	TEST_UNSUPPORTED(".short 0xea1d,0x0f07	@ tst.w sp, r7")
+	TEST_UNSUPPORTED(".short 0xea1f,0x0f07	@ tst.w pc, r7")
+	TEST_UNSUPPORTED(".short 0xf01d,0x1f08	@ tst sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf01f,0x1f08	@ tst pc, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xea97,0x0f0d	@ teq.w r7, sp")
+	TEST_UNSUPPORTED(".short 0xea97,0x0f0f	@ teq.w r7, pc")
+	TEST_UNSUPPORTED(".short 0xea9d,0x0f07	@ teq.w sp, r7")
+	TEST_UNSUPPORTED(".short 0xea9f,0x0f07	@ teq.w pc, r7")
+	TEST_UNSUPPORTED(".short 0xf09d,0x1f08	@ tst sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf09f,0x1f08	@ tst pc, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xeb17,0x0f0d	@ cmn.w r7, sp")
+	TEST_UNSUPPORTED(".short 0xeb17,0x0f0f	@ cmn.w r7, pc")
+	TEST_P("cmn.w	sp, r",7,0,"")
+	TEST_UNSUPPORTED(".short 0xeb1f,0x0f07	@ cmn.w pc, r7")
+	TEST(  "cmn	sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf11f,0x1f08	@ cmn pc, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xebb7,0x0f0d	@ cmp.w r7, sp")
+	TEST_UNSUPPORTED(".short 0xebb7,0x0f0f	@ cmp.w r7, pc")
+	TEST_P("cmp.w	sp, r",7,0,"")
+	TEST_UNSUPPORTED(".short 0xebbf,0x0f07	@ cmp.w pc, r7")
+	TEST(  "cmp	sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf1bf,0x1f08	@ cmp pc, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xea5f,0x070d	@ movs.w r7, sp")
+	TEST_UNSUPPORTED(".short 0xea5f,0x070f	@ movs.w r7, pc")
+	TEST_UNSUPPORTED(".short 0xea5f,0x0d07	@ movs.w sp, r7")
+	TEST_UNSUPPORTED(".short 0xea4f,0x0f07	@ mov.w  pc, r7")
+	TEST_UNSUPPORTED(".short 0xf04f,0x1d08	@ mov sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf04f,0x1f08	@ mov pc, #0x00080008")
+
+	TEST_R("add.w	r0, sp, r",1, 4,"")
+	TEST_R("adds	r0, sp, r",1, 4,", asl #3")
+	TEST_R("add	r0, sp, r",1, 4,", asl #4")
+	TEST_R("add	r0, sp, r",1, 16,", ror #1")
+	TEST_R("add.w	sp, sp, r",1, 4,"")
+	TEST_R("add	sp, sp, r",1, 4,", asl #3")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x1d01	@ add sp, sp, r1, asl #4")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x0d71	@ add sp, sp, r1, ror #1")
+	TEST(  "add.w	r0, sp, #24")
+	TEST(  "add.w	sp, sp, #24")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x0f01	@ add pc, sp, r1")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x000f	@ add r0, sp, pc")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x000d	@ add r0, sp, sp")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f	@ add sp, sp, pc")
+	TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d	@ add sp, sp, sp")
+
+	TEST_R("sub.w	r0, sp, r",1, 4,"")
+	TEST_R("subs	r0, sp, r",1, 4,", asl #3")
+	TEST_R("sub	r0, sp, r",1, 4,", asl #4")
+	TEST_R("sub	r0, sp, r",1, 16,", ror #1")
+	TEST_R("sub.w	sp, sp, r",1, 4,"")
+	TEST_R("sub	sp, sp, r",1, 4,", asl #3")
+	TEST_UNSUPPORTED(".short 0xebad,0x1d01	@ sub sp, sp, r1, asl #4")
+	TEST_UNSUPPORTED(".short 0xebad,0x0d71	@ sub sp, sp, r1, ror #1")
+	TEST_UNSUPPORTED(".short 0xebad,0x0f01	@ sub pc, sp, r1")
+	TEST(  "sub.w	r0, sp, #24")
+	TEST(  "sub.w	sp, sp, #24")
+
+	TEST_UNSUPPORTED(".short 0xea02,0x010f	@ and r1, r2, pc")
+	TEST_UNSUPPORTED(".short 0xea0f,0x0103	@ and r1, pc, r3")
+	TEST_UNSUPPORTED(".short 0xea02,0x0f03	@ and pc, r2, r3")
+	TEST_UNSUPPORTED(".short 0xea02,0x010d	@ and r1, r2, sp")
+	TEST_UNSUPPORTED(".short 0xea0d,0x0103	@ and r1, sp, r3")
+	TEST_UNSUPPORTED(".short 0xea02,0x0d03	@ and sp, r2, r3")
+	TEST_UNSUPPORTED(".short 0xf00d,0x1108	@ and r1, sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf00f,0x1108	@ and r1, pc, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf002,0x1d08	@ and sp, r8, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf002,0x1f08	@ and pc, r8, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xeb02,0x010f	@ add r1, r2, pc")
+	TEST_UNSUPPORTED(".short 0xeb0f,0x0103	@ add r1, pc, r3")
+	TEST_UNSUPPORTED(".short 0xeb02,0x0f03	@ add pc, r2, r3")
+	TEST_UNSUPPORTED(".short 0xeb02,0x010d	@ add r1, r2, sp")
+	TEST_SUPPORTED(  ".short 0xeb0d,0x0103	@ add r1, sp, r3")
+	TEST_UNSUPPORTED(".short 0xeb02,0x0d03	@ add sp, r2, r3")
+	TEST_SUPPORTED(  ".short 0xf10d,0x1108	@ add r1, sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf10d,0x1f08	@ add pc, sp, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf10f,0x1108	@ add r1, pc, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf102,0x1d08	@ add sp, r8, #0x00080008")
+	TEST_UNSUPPORTED(".short 0xf102,0x1f08	@ add pc, r8, #0x00080008")
+
+	TEST_UNSUPPORTED(".short 0xeaa0,0x0000")
+	TEST_UNSUPPORTED(".short 0xeaf0,0x0000")
+	TEST_UNSUPPORTED(".short 0xeb20,0x0000")
+	TEST_UNSUPPORTED(".short 0xeb80,0x0000")
+	TEST_UNSUPPORTED(".short 0xebe0,0x0000")
+
+	TEST_UNSUPPORTED(".short 0xf0a0,0x0000")
+	TEST_UNSUPPORTED(".short 0xf0c0,0x0000")
+	TEST_UNSUPPORTED(".short 0xf0f0,0x0000")
+	TEST_UNSUPPORTED(".short 0xf120,0x0000")
+	TEST_UNSUPPORTED(".short 0xf180,0x0000")
+	TEST_UNSUPPORTED(".short 0xf1e0,0x0000")
+
+	TEST_GROUP("Coprocessor instructions")
+
+	TEST_UNSUPPORTED(".short 0xec00,0x0000")
+	TEST_UNSUPPORTED(".short 0xeff0,0x0000")
+	TEST_UNSUPPORTED(".short 0xfc00,0x0000")
+	TEST_UNSUPPORTED(".short 0xfff0,0x0000")
+
+	TEST_GROUP("Data-processing (plain binary immediate)")
+
+	TEST_R("addw	r0,  r",1, VAL1,", #0x123")
+	TEST(  "addw	r14, sp, #0xf5a")
+	TEST(  "addw	sp, sp, #0x20")
+	TEST(  "addw	r7,  pc, #0x888")
+	TEST_UNSUPPORTED(".short 0xf20f,0x1f20	@ addw pc, pc, #0x120")
+	TEST_UNSUPPORTED(".short 0xf20d,0x1f20	@ addw pc, sp, #0x120")
+	TEST_UNSUPPORTED(".short 0xf20f,0x1d20	@ addw sp, pc, #0x120")
+	TEST_UNSUPPORTED(".short 0xf200,0x1d20	@ addw sp, r0, #0x120")
+
+	TEST_R("subw	r0,  r",1, VAL1,", #0x123")
+	TEST(  "subw	r14, sp, #0xf5a")
+	TEST(  "subw	sp, sp, #0x20")
+	TEST(  "subw	r7,  pc, #0x888")
+	TEST_UNSUPPORTED(".short 0xf2af,0x1f20	@ subw pc, pc, #0x120")
+	TEST_UNSUPPORTED(".short 0xf2ad,0x1f20	@ subw pc, sp, #0x120")
+	TEST_UNSUPPORTED(".short 0xf2af,0x1d20	@ subw sp, pc, #0x120")
+	TEST_UNSUPPORTED(".short 0xf2a0,0x1d20	@ subw sp, r0, #0x120")
+
+	TEST("movw	r0, #0")
+	TEST("movw	r0, #0xffff")
+	TEST("movw	lr, #0xffff")
+	TEST_UNSUPPORTED(".short 0xf240,0x0d00	@ movw sp, #0")
+	TEST_UNSUPPORTED(".short 0xf240,0x0f00	@ movw pc, #0")
+
+	TEST_R("movt	r",0, VAL1,", #0")
+	TEST_R("movt	r",0, VAL2,", #0xffff")
+	TEST_R("movt	r",14,VAL1,", #0xffff")
+	TEST_UNSUPPORTED(".short 0xf2c0,0x0d00	@ movt sp, #0")
+	TEST_UNSUPPORTED(".short 0xf2c0,0x0f00	@ movt pc, #0")
+
+	TEST_R(     "ssat	r0, #24, r",0,   VAL1,"")
+	TEST_R(     "ssat	r14, #24, r",12, VAL2,"")
+	TEST_R(     "ssat	r0, #24, r",0,   VAL1,", lsl #8")
+	TEST_R(     "ssat	r14, #24, r",12, VAL2,", asr #8")
+	TEST_UNSUPPORTED(".short 0xf30c,0x0d17	@ ssat	sp, #24, r12")
+	TEST_UNSUPPORTED(".short 0xf30c,0x0f17	@ ssat	pc, #24, r12")
+	TEST_UNSUPPORTED(".short 0xf30d,0x0c17	@ ssat	r12, #24, sp")
+	TEST_UNSUPPORTED(".short 0xf30f,0x0c17	@ ssat	r12, #24, pc")
+
+	TEST_R(     "usat	r0, #24, r",0,   VAL1,"")
+	TEST_R(     "usat	r14, #24, r",12, VAL2,"")
+	TEST_R(     "usat	r0, #24, r",0,   VAL1,", lsl #8")
+	TEST_R(     "usat	r14, #24, r",12, VAL2,", asr #8")
+	TEST_UNSUPPORTED(".short 0xf38c,0x0d17	@ usat	sp, #24, r12")
+	TEST_UNSUPPORTED(".short 0xf38c,0x0f17	@ usat	pc, #24, r12")
+	TEST_UNSUPPORTED(".short 0xf38d,0x0c17	@ usat	r12, #24, sp")
+	TEST_UNSUPPORTED(".short 0xf38f,0x0c17	@ usat	r12, #24, pc")
+
+	TEST_R(     "ssat16	r0, #12, r",0,   HH1,"")
+	TEST_R(     "ssat16	r14, #12, r",12, HH2,"")
+	TEST_UNSUPPORTED(".short 0xf32c,0x0d0b	@ ssat16	sp, #12, r12")
+	TEST_UNSUPPORTED(".short 0xf32c,0x0f0b	@ ssat16	pc, #12, r12")
+	TEST_UNSUPPORTED(".short 0xf32d,0x0c0b	@ ssat16	r12, #12, sp")
+	TEST_UNSUPPORTED(".short 0xf32f,0x0c0b	@ ssat16	r12, #12, pc")
+
+	TEST_R(     "usat16	r0, #12, r",0,   HH1,"")
+	TEST_R(     "usat16	r14, #12, r",12, HH2,"")
+	TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b	@ usat16	sp, #12, r12")
+	TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b	@ usat16	pc, #12, r12")
+	TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b	@ usat16	r12, #12, sp")
+	TEST_UNSUPPORTED(".short 0xf3af,0x0c0b	@ usat16	r12, #12, pc")
+
+	TEST_R(     "sbfx	r0, r",0  , VAL1,", #0, #31")
+	TEST_R(     "sbfx	r14, r",12, VAL2,", #8, #16")
+	TEST_R(     "sbfx	r4, r",10,  VAL1,", #16, #15")
+	TEST_UNSUPPORTED(".short 0xf34c,0x2d0f	@ sbfx	sp, r12, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf34c,0x2f0f	@ sbfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf34d,0x2c0f	@ sbfx	r12, sp, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf34f,0x2c0f	@ sbfx	r12, pc, #8, #16")
+
+	TEST_R(     "ubfx	r0, r",0  , VAL1,", #0, #31")
+	TEST_R(     "ubfx	r14, r",12, VAL2,", #8, #16")
+	TEST_R(     "ubfx	r4, r",10,  VAL1,", #16, #15")
+	TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f	@ ubfx	sp, r12, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f	@ ubfx	pc, r12, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f	@ ubfx	r12, sp, #8, #16")
+	TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f	@ ubfx	r12, pc, #8, #16")
+
+	TEST_R(     "bfc	r",0, VAL1,", #4, #20")
+	TEST_R(     "bfc	r",14,VAL2,", #4, #20")
+	TEST_R(     "bfc	r",7, VAL1,", #0, #31")
+	TEST_R(     "bfc	r",8, VAL2,", #0, #31")
+	TEST_UNSUPPORTED(".short 0xf36f,0x0d1e	@ bfc	sp, #0, #31")
+	TEST_UNSUPPORTED(".short 0xf36f,0x0f1e	@ bfc	pc, #0, #31")
+
+	TEST_RR(    "bfi	r",0, VAL1,", r",0  , VAL2,", #0, #31")
+	TEST_RR(    "bfi	r",12,VAL1,", r",14 , VAL2,", #4, #20")
+	TEST_UNSUPPORTED(".short 0xf36e,0x1d17	@ bfi	sp, r14, #4, #20")
+	TEST_UNSUPPORTED(".short 0xf36e,0x1f17	@ bfi	pc, r14, #4, #20")
+	TEST_UNSUPPORTED(".short 0xf36d,0x1e17	@ bfi	r14, sp, #4, #20")
+
+	TEST_GROUP("Branches and miscellaneous control")
+
+CONDITION_INSTRUCTIONS(22,
+	TEST_BF("beq.w	2f")
+	TEST_BB("bne.w	2b")
+	TEST_BF("bgt.w	2f")
+	TEST_BB("blt.w	2b")
+	TEST_BF_X("bpl.w	2f", SPACE_0x1000)
+)
+
+	TEST_UNSUPPORTED("msr	cpsr, r0")
+	TEST_UNSUPPORTED("msr	cpsr_f, r1")
+	TEST_UNSUPPORTED("msr	spsr, r2")
+
+	TEST_UNSUPPORTED("cpsie.w	i")
+	TEST_UNSUPPORTED("cpsid.w	i")
+	TEST_UNSUPPORTED("cps	0x13")
+
+	TEST_SUPPORTED("yield.w")
+	TEST("sev.w")
+	TEST("nop.w")
+	TEST("wfi.w")
+	TEST_SUPPORTED("wfe.w")
+	TEST_UNSUPPORTED("dbg.w	#0")
+
+	TEST_UNSUPPORTED("clrex")
+	TEST_UNSUPPORTED("dsb")
+	TEST_UNSUPPORTED("dmb")
+	TEST_UNSUPPORTED("isb")
+
+	TEST_UNSUPPORTED("bxj	r0")
+
+	TEST_UNSUPPORTED("subs	pc, lr, #4")
+
+	TEST("mrs	r0, cpsr")
+	TEST("mrs	r14, cpsr")
+	TEST_UNSUPPORTED(".short 0xf3ef,0x8d00	@ mrs	sp, spsr")
+	TEST_UNSUPPORTED(".short 0xf3ef,0x8f00	@ mrs	pc, spsr")
+	TEST_UNSUPPORTED("mrs	r0, spsr")
+	TEST_UNSUPPORTED("mrs	lr, spsr")
+
+	TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0")
+
+	TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined")
+
+	TEST_BF(  "b.w	2f")
+	TEST_BB(  "b.w	2b")
+	TEST_BF_X("b.w	2f", SPACE_0x1000)
+
+	TEST_BF(  "bl.w	2f")
+	TEST_BB(  "bl.w	2b")
+	TEST_BB_X("bl.w	2b", SPACE_0x1000)
+
+	TEST_X(	"blx	__dummy_arm_subroutine",
+		".arm				\n\t"
+		".align				\n\t"
+		".type __dummy_arm_subroutine, %%function \n\t"
+		"__dummy_arm_subroutine:	\n\t"
+		"mov	r0, pc			\n\t"
+		"bx	lr			\n\t"
+		".thumb				\n\t"
+	)
+	TEST(	"blx	__dummy_arm_subroutine")
+
+	TEST_GROUP("Store single data item")
+
+#define SINGLE_STORE(size)							\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,-1024,", #1024]")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, -1024,", #1080]")		\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,256,  ", #-120]")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, 256,  ", #-128]")		\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,24,  "], #120")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, 24,  "], #128")		\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,24,  "], #-120")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, 24,  "], #-128")		\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,24,   ", #120]!")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, 24,   ", #128]!")		\
+	TEST_RP( "str"size"	r",0, VAL1,", [r",11,256,  ", #-120]!")		\
+	TEST_RP( "str"size"	r",14,VAL2,", [r",1, 256,  ", #-128]!")		\
+	TEST_RPR("str"size".w	r",0, VAL1,", [r",1, 0,", r",2, 4,"]")		\
+	TEST_RPR("str"size"	r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]")	\
+	TEST_R(  "str"size".w	r",7, VAL1,", [sp, #24]")			\
+	TEST_RP( "str"size".w	r",0, VAL2,", [r",0,0, "]")			\
+	TEST_UNSUPPORTED("str"size"t	r0, [r1, #4]")
+
+	SINGLE_STORE("b")
+	SINGLE_STORE("h")
+	SINGLE_STORE("")
+
+	TEST("str	sp, [sp]")
+	TEST_UNSUPPORTED(".short 0xf8cf,0xe000	@ str	r14, [pc]")
+	TEST_UNSUPPORTED(".short 0xf8ce,0xf000	@ str	pc, [r14]")
+
+	TEST_GROUP("Advanced SIMD element or structure load/store instructions")
+
+	TEST_UNSUPPORTED(".short 0xf900,0x0000")
+	TEST_UNSUPPORTED(".short 0xf92f,0xffff")
+	TEST_UNSUPPORTED(".short 0xf980,0x0000")
+	TEST_UNSUPPORTED(".short 0xf9ef,0xffff")
+
+	TEST_GROUP("Load single data item and memory hints")
+
+#define SINGLE_LOAD(size)						\
+	TEST_P( "ldr"size"	r0, [r",11,-1024, ", #1024]")		\
+	TEST_P( "ldr"size"	r14, [r",1, -1024,", #1080]")		\
+	TEST_P( "ldr"size"	r0, [r",11,256,   ", #-120]")		\
+	TEST_P( "ldr"size"	r14, [r",1, 256,  ", #-128]")		\
+	TEST_P( "ldr"size"	r0, [r",11,24,   "], #120")		\
+	TEST_P( "ldr"size"	r14, [r",1, 24,  "], #128")		\
+	TEST_P( "ldr"size"	r0, [r",11,24,   "], #-120")		\
+	TEST_P( "ldr"size"	r14, [r",1,24,   "], #-128")		\
+	TEST_P( "ldr"size"	r0, [r",11,24,    ", #120]!")		\
+	TEST_P( "ldr"size"	r14, [r",1, 24,   ", #128]!")		\
+	TEST_P( "ldr"size"	r0, [r",11,256,   ", #-120]!")		\
+	TEST_P( "ldr"size"	r14, [r",1, 256,  ", #-128]!")		\
+	TEST_PR("ldr"size".w	r0, [r",1, 0,", r",2, 4,"]")		\
+	TEST_PR("ldr"size"	r14, [r",10,0,", r",11,4,", lsl #1]")	\
+	TEST_X( "ldr"size".w	r0, 3f",				\
+		".align 3				\n\t"		\
+		"3:	.word	"__stringify(VAL1))			\
+	TEST_X( "ldr"size".w	r14, 3f",				\
+		".align 3				\n\t"		\
+		"3:	.word	"__stringify(VAL2))			\
+	TEST(   "ldr"size".w	r7, 3b")				\
+	TEST(   "ldr"size".w	r7, [sp, #24]")				\
+	TEST_P( "ldr"size".w	r0, [r",0,0, "]")			\
+	TEST_UNSUPPORTED("ldr"size"t	r0, [r1, #4]")
+
+	SINGLE_LOAD("b")
+	SINGLE_LOAD("sb")
+	SINGLE_LOAD("h")
+	SINGLE_LOAD("sh")
+	SINGLE_LOAD("")
+
+	TEST_BF_P("ldr	pc, [r",14, 15*4,"]")
+	TEST_P(   "ldr	sp, [r",14, 13*4,"]")
+	TEST_BF_R("ldr	pc, [sp, r",14, 15*4,"]")
+	TEST_R(   "ldr	sp, [sp, r",14, 13*4,"]")
+	TEST_THUMB_TO_ARM_INTERWORK_P("ldr	pc, [r",0,0,", #15*4]")
+	TEST_SUPPORTED("ldr	sp, 99f")
+	TEST_SUPPORTED("ldr	pc, 99f")
+
+	TEST_UNSUPPORTED(".short 0xf854,0x700d	@ ldr	r7, [r4, sp]")
+	TEST_UNSUPPORTED(".short 0xf854,0x700f	@ ldr	r7, [r4, pc]")
+	TEST_UNSUPPORTED(".short 0xf814,0x700d	@ ldrb	r7, [r4, sp]")
+	TEST_UNSUPPORTED(".short 0xf814,0x700f	@ ldrb	r7, [r4, pc]")
+	TEST_UNSUPPORTED(".short 0xf89f,0xd004	@ ldrb	sp, 99f")
+	TEST_UNSUPPORTED(".short 0xf814,0xd008	@ ldrb	sp, [r4, r8]")
+	TEST_UNSUPPORTED(".short 0xf894,0xd000	@ ldrb	sp, [r4]")
+
+	TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */
+
+	TEST(   "pli	[pc, #4]")
+	TEST(   "pli	[pc, #-4]")
+	TEST(   "pld	[pc, #4]")
+	TEST(   "pld	[pc, #-4]")
+
+	TEST_P( "pld	[r",0,-1024,", #1024]")
+	TEST(   ".short 0xf8b0,0xf400	@ pldw	[r0, #1024]")
+	TEST_P( "pli	[r",4, 0b,", #1024]")
+	TEST_P( "pld	[r",7, 120,", #-120]")
+	TEST(   ".short 0xf837,0xfc78	@ pldw	[r7, #-120]")
+	TEST_P( "pli	[r",11,120,", #-120]")
+	TEST(   "pld	[sp, #0]")
+
+	TEST_PR("pld	[r",7, 24, ", r",0, 16,"]")
+	TEST_PR("pld	[r",8, 24, ", r",12,16,", lsl #3]")
+	TEST_SUPPORTED(".short 0xf837,0xf000	@ pldw	[r7, r0]")
+	TEST_SUPPORTED(".short 0xf838,0xf03c	@ pldw	[r8, r12, lsl #3]");
+	TEST_RR("pli	[r",12,0b,", r",0, 16,"]")
+	TEST_RR("pli	[r",0, 0b,", r",12,16,", lsl #3]")
+	TEST_R( "pld	[sp, r",1, 16,"]")
+	TEST_UNSUPPORTED(".short 0xf817,0xf00d  @pld	[r7, sp]")
+	TEST_UNSUPPORTED(".short 0xf817,0xf00f  @pld	[r7, pc]")
+
+	TEST_GROUP("Data-processing (register)")
+
+#define SHIFTS32(op)					\
+	TEST_RR(op"	r0,  r",1, VAL1,", r",2, 3, "")	\
+	TEST_RR(op"	r14, r",12,VAL2,", r",11,10,"")
+
+	SHIFTS32("lsl")
+	SHIFTS32("lsls")
+	SHIFTS32("lsr")
+	SHIFTS32("lsrs")
+	SHIFTS32("asr")
+	SHIFTS32("asrs")
+	SHIFTS32("ror")
+	SHIFTS32("rors")
+
+	TEST_UNSUPPORTED(".short 0xfa01,0xff02	@ lsl	pc, r1, r2")
+	TEST_UNSUPPORTED(".short 0xfa01,0xfd02	@ lsl	sp, r1, r2")
+	TEST_UNSUPPORTED(".short 0xfa0f,0xf002	@ lsl	r0, pc, r2")
+	TEST_UNSUPPORTED(".short 0xfa0d,0xf002	@ lsl	r0, sp, r2")
+	TEST_UNSUPPORTED(".short 0xfa01,0xf00f	@ lsl	r0, r1, pc")
+	TEST_UNSUPPORTED(".short 0xfa01,0xf00d	@ lsl	r0, r1, sp")
+
+	TEST_RR(    "sxtah	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxth	r8, r",7,  HH1,"")
+
+	TEST_UNSUPPORTED(".short 0xfa0f,0xff87	@ sxth	pc, r7");
+	TEST_UNSUPPORTED(".short 0xfa0f,0xfd87	@ sxth	sp, r7");
+	TEST_UNSUPPORTED(".short 0xfa0f,0xf88f	@ sxth	r8, pc");
+	TEST_UNSUPPORTED(".short 0xfa0f,0xf88d	@ sxth	r8, sp");
+
+	TEST_RR(    "uxtah	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtah	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxth	r8, r",7,  HH1,"")
+
+	TEST_RR(    "sxtab16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxtb16	r8, r",7,  HH1,"")
+
+	TEST_RR(    "uxtab16	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtab16	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxtb16	r8, r",7,  HH1,"")
+
+	TEST_RR(    "sxtab	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "sxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "sxtb	r8, r",7,  HH1,"")
+
+	TEST_RR(    "uxtab	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "uxtab	r14,r",12, HH2,", r",10,HH1,", ror #8")
+	TEST_R(     "uxtb	r8, r",7,  HH1,"")
+
+	TEST_UNSUPPORTED(".short 0xfa60,0x00f0")
+	TEST_UNSUPPORTED(".short 0xfa7f,0xffff")
+
+#define PARALLEL_ADD_SUB(op)					\
+	TEST_RR(  op"add16	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"add16	r14, r",12,HH2,", r",10,HH1,"")	\
+	TEST_RR(  op"asx	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"asx	r14, r",12,HH2,", r",10,HH1,"")	\
+	TEST_RR(  op"sax	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"sax	r14, r",12,HH2,", r",10,HH1,"")	\
+	TEST_RR(  op"sub16	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"sub16	r14, r",12,HH2,", r",10,HH1,"")	\
+	TEST_RR(  op"add8	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"add8	r14, r",12,HH2,", r",10,HH1,"")	\
+	TEST_RR(  op"sub8	r0, r",0,  HH1,", r",1, HH2,"")	\
+	TEST_RR(  op"sub8	r14, r",12,HH2,", r",10,HH1,"")
+
+	TEST_GROUP("Parallel addition and subtraction, signed")
+
+	PARALLEL_ADD_SUB("s")
+	PARALLEL_ADD_SUB("q")
+	PARALLEL_ADD_SUB("sh")
+
+	TEST_GROUP("Parallel addition and subtraction, unsigned")
+
+	PARALLEL_ADD_SUB("u")
+	PARALLEL_ADD_SUB("uq")
+	PARALLEL_ADD_SUB("uh")
+
+	TEST_GROUP("Miscellaneous operations")
+
+	TEST_RR("qadd	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR("qadd	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_RR("qsub	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR("qsub	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_RR("qdadd	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR("qdadd	lr, r",9, VAL2,", r",8, VAL1,"")
+	TEST_RR("qdsub	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR("qdsub	lr, r",9, VAL2,", r",8, VAL1,"")
+
+	TEST_R("rev.w	r0, r",0,   VAL1,"")
+	TEST_R("rev	r14, r",12, VAL2,"")
+	TEST_R("rev16.w	r0, r",0,   VAL1,"")
+	TEST_R("rev16	r14, r",12, VAL2,"")
+	TEST_R("rbit	r0, r",0,   VAL1,"")
+	TEST_R("rbit	r14, r",12, VAL2,"")
+	TEST_R("revsh.w	r0, r",0,   VAL1,"")
+	TEST_R("revsh	r14, r",12, VAL2,"")
+
+	TEST_UNSUPPORTED(".short 0xfa9c,0xff8c	@ rev	pc, r12");
+	TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c	@ rev	sp, r12");
+	TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f	@ rev	r14, pc");
+	TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d	@ rev	r14, sp");
+
+	TEST_RR("sel	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR("sel	r14, r",12,VAL1,", r",10, VAL2,"")
+
+	TEST_R("clz	r0, r",0, 0x0,"")
+	TEST_R("clz	r7, r",14,0x1,"")
+	TEST_R("clz	lr, r",7, 0xffffffff,"")
+
+	TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
+
+	TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
+
+	TEST_RR(    "mul	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "mul	r7, r",8, VAL2,", r",9, VAL2,"")
+	TEST_UNSUPPORTED(".short 0xfb08,0xff09	@ mul	pc, r8, r9")
+	TEST_UNSUPPORTED(".short 0xfb08,0xfd09	@ mul	sp, r8, r9")
+	TEST_UNSUPPORTED(".short 0xfb0f,0xf709	@ mul	r7, pc, r9")
+	TEST_UNSUPPORTED(".short 0xfb0d,0xf709	@ mul	r7, sp, r9")
+	TEST_UNSUPPORTED(".short 0xfb08,0xf70f	@ mul	r7, r8, pc")
+	TEST_UNSUPPORTED(".short 0xfb08,0xf70d	@ mul	r7, r8, sp")
+
+	TEST_RRR(   "mla	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "mla	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_UNSUPPORTED(".short 0xfb08,0xaf09	@ mla	pc, r8, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb08,0xad09	@ mla	sp, r8, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb0f,0xa709	@ mla	r7, pc, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb0d,0xa709	@ mla	r7, sp, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb08,0xa70f	@ mla	r7, r8, pc, r10");
+	TEST_UNSUPPORTED(".short 0xfb08,0xa70d	@ mla	r7, r8, sp, r10");
+	TEST_UNSUPPORTED(".short 0xfb08,0xd709	@ mla	r7, r8, r9, sp");
+
+	TEST_RRR(   "mls	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "mls	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+
+	TEST_RRR(   "smlabb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlabb	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RRR(   "smlatb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlatb	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RRR(   "smlabt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlabt	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RRR(   "smlatt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlatt	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(    "smulbb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulbb	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_RR(    "smultb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smultb	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_RR(    "smulbt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulbt	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_RR(    "smultt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smultt	r7, r",8, VAL3,", r",9, VAL1,"")
+
+	TEST_RRR(   "smlad	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlad	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_RRR(   "smladx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smladx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_RR(    "smuad	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "smuad	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_RR(    "smuadx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "smuadx	r14, r",12,HH2,", r",10,HH1,"")
+
+	TEST_RRR(   "smlawb	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlawb	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RRR(   "smlawt	r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
+	TEST_RRR(   "smlawt	r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+	TEST_RR(    "smulwb	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulwb	r7, r",8, VAL3,", r",9, VAL1,"")
+	TEST_RR(    "smulwt	r0, r",1, VAL1,", r",2, VAL2,"")
+	TEST_RR(    "smulwt	r7, r",8, VAL3,", r",9, VAL1,"")
+
+	TEST_RRR(   "smlsd	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlsd	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_RRR(   "smlsdx	r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
+	TEST_RRR(   "smlsdx	r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+	TEST_RR(    "smusd	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "smusd	r14, r",12,HH2,", r",10,HH1,"")
+	TEST_RR(    "smusdx	r0, r",0,  HH1,", r",1, HH2,"")
+	TEST_RR(    "smusdx	r14, r",12,HH2,", r",10,HH1,"")
+
+	TEST_RRR(   "smmla	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(   "smmla	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_RRR(   "smmlar	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(   "smmlar	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_RR(    "smmul	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(    "smmul	r14, r",12,VAL2,", r",10,VAL1,"")
+	TEST_RR(    "smmulr	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(    "smmulr	r14, r",12,VAL2,", r",10,VAL1,"")
+
+	TEST_RRR(   "smmls	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(   "smmls	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+	TEST_RRR(   "smmlsr	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
+	TEST_RRR(   "smmlsr	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+
+	TEST_RRR(   "usada8	r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL3,"")
+	TEST_RRR(   "usada8	r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
+	TEST_RR(    "usad8	r0, r",0,  VAL1,", r",1, VAL2,"")
+	TEST_RR(    "usad8	r14, r",12,VAL2,", r",10,VAL1,"")
+
+	TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */
+	TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
+
+	TEST_GROUP("Long multiply, long multiply accumulate, and divide")
+
+	TEST_RR(   "smull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(   "smull	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+	TEST_UNSUPPORTED(".short 0xfb89,0xf80a	@ smull	pc, r8, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb89,0xd80a	@ smull	sp, r8, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb89,0x7f0a	@ smull	r7, pc, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb89,0x7d0a	@ smull	r7, sp, r9, r10");
+	TEST_UNSUPPORTED(".short 0xfb8f,0x780a	@ smull	r7, r8, pc, r10");
+	TEST_UNSUPPORTED(".short 0xfb8d,0x780a	@ smull	r7, r8, sp, r10");
+	TEST_UNSUPPORTED(".short 0xfb89,0x780f	@ smull	r7, r8, r9, pc");
+	TEST_UNSUPPORTED(".short 0xfb89,0x780d	@ smull	r7, r8, r9, sp");
+
+	TEST_RR(   "umull	r0, r1, r",2, VAL1,", r",3, VAL2,"")
+	TEST_RR(   "umull	r7, r8, r",9, VAL2,", r",10, VAL1,"")
+
+	TEST_RRRR( "smlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "smlal	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+	TEST_RRRR( "smlalbb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "smlalbb	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRRR( "smlalbt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "smlalbt	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRRR( "smlaltb	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "smlaltb	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRRR( "smlaltt	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "smlaltt	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+	TEST_RRRR( "smlald	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlald	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+	TEST_RRRR( "smlaldx	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlaldx	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+
+	TEST_RRRR( "smlsld	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlsld	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+	TEST_RRRR( "smlsldx	r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+	TEST_RRRR( "smlsldx	r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+
+	TEST_RRRR( "umlal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "umlal	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+	TEST_RRRR( "umaal	r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+	TEST_RRRR( "umaal	r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+	TEST_GROUP("Coprocessor instructions")
+
+	TEST_UNSUPPORTED(".short 0xfc00,0x0000")
+	TEST_UNSUPPORTED(".short 0xffff,0xffff")
+
+	TEST_GROUP("Testing instructions in IT blocks")
+
+	TEST_ITBLOCK("sub.w	r0, r0")
+
+	verbose("\n");
+}
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.c
new file mode 100644
index 0000000..1862d8f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.c
@@ -0,0 +1,1696 @@
+/*
+ * arch/arm/kernel/kprobes-test.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This file contains test code for ARM kprobes.
+ *
+ * The top level function run_all_tests() executes tests for all of the
+ * supported instruction sets: ARM, 16-bit Thumb, and 32-bit Thumb. These tests
+ * fall into two categories; run_api_tests() checks basic functionality of the
+ * kprobes API, and run_test_cases() is a comprehensive test for kprobes
+ * instruction decoding and simulation.
+ *
+ * run_test_cases() first checks the kprobes decoding table for self consistency
+ * (using table_test()) then executes a series of test cases for each of the CPU
+ * instruction forms. coverage_start() and coverage_end() are used to verify
+ * that these test cases cover all of the possible combinations of instructions
+ * described by the kprobes decoding tables.
+ *
+ * The individual test cases are in kprobes-test-arm.c and kprobes-test-thumb.c
+ * which use the macros defined in kprobes-test.h. The rest of this
+ * documentation will describe the operation of the framework used by these
+ * test cases.
+ */
+
+/*
+ * TESTING METHODOLOGY
+ * -------------------
+ *
+ * The methodology used to test an ARM instruction 'test_insn' is to use
+ * inline assembler like:
+ *
+ * test_before: nop
+ * test_case:	test_insn
+ * test_after:	nop
+ *
+ * When the test case is run a kprobe is placed of each nop. The
+ * post-handler of the test_before probe is used to modify the saved CPU
+ * register context to that which we require for the test case. The
+ * pre-handler of the of the test_after probe saves a copy of the CPU
+ * register context. In this way we can execute test_insn with a specific
+ * register context and see the results afterwards.
+ *
+ * To actually test the kprobes instruction emulation we perform the above
+ * step a second time but with an additional kprobe on the test_case
+ * instruction itself. If the emulation is accurate then the results seen
+ * by the test_after probe will be identical to the first run which didn't
+ * have a probe on test_case.
+ *
+ * Each test case is run several times with a variety of variations in the
+ * flags value of stored in CPSR, and for Thumb code, different ITState.
+ *
+ * For instructions which can modify PC, a second test_after probe is used
+ * like this:
+ *
+ * test_before: nop
+ * test_case:	test_insn
+ * test_after:	nop
+ *		b test_done
+ * test_after2: nop
+ * test_done:
+ *
+ * The test case is constructed such that test_insn branches to
+ * test_after2, or, if testing a conditional instruction, it may just
+ * continue to test_after. The probes inserted at both locations let us
+ * determine which happened. A similar approach is used for testing
+ * backwards branches...
+ *
+ *		b test_before
+ *		b test_done  @ helps to cope with off by 1 branches
+ * test_after2: nop
+ *		b test_done
+ * test_before: nop
+ * test_case:	test_insn
+ * test_after:	nop
+ * test_done:
+ *
+ * The macros used to generate the assembler instructions describe above
+ * are TEST_INSTRUCTION, TEST_BRANCH_F (branch forwards) and TEST_BRANCH_B
+ * (branch backwards). In these, the local variables numbered 1, 50, 2 and
+ * 99 represent: test_before, test_case, test_after2 and test_done.
+ *
+ * FRAMEWORK
+ * ---------
+ *
+ * Each test case is wrapped between the pair of macros TESTCASE_START and
+ * TESTCASE_END. As well as performing the inline assembler boilerplate,
+ * these call out to the kprobes_test_case_start() and
+ * kprobes_test_case_end() functions which drive the execution of the test
+ * case. The specific arguments to use for each test case are stored as
+ * inline data constructed using the various TEST_ARG_* macros. Putting
+ * this all together, a simple test case may look like:
+ *
+ *	TESTCASE_START("Testing mov r0, r7")
+ *	TEST_ARG_REG(7, 0x12345678) // Set r7=0x12345678
+ *	TEST_ARG_END("")
+ *	TEST_INSTRUCTION("mov r0, r7")
+ *	TESTCASE_END
+ *
+ * Note, in practice the single convenience macro TEST_R would be used for this
+ * instead.
+ *
+ * The above would expand to assembler looking something like:
+ *
+ *	@ TESTCASE_START
+ *	bl	__kprobes_test_case_start
+ *	@ start of inline data...
+ *	.ascii "mov r0, r7"	@ text title for test case
+ *	.byte	0
+ *	.align	2
+ *
+ *	@ TEST_ARG_REG
+ *	.byte	ARG_TYPE_REG
+ *	.byte	7
+ *	.short	0
+ *	.word	0x1234567
+ *
+ *	@ TEST_ARG_END
+ *	.byte	ARG_TYPE_END
+ *	.byte	TEST_ISA	@ flags, including ISA being tested
+ *	.short	50f-0f		@ offset of 'test_before'
+ *	.short	2f-0f		@ offset of 'test_after2' (if relevent)
+ *	.short	99f-0f		@ offset of 'test_done'
+ *	@ start of test case code...
+ *	0:
+ *	.code	TEST_ISA	@ switch to ISA being tested
+ *
+ *	@ TEST_INSTRUCTION
+ *	50:	nop		@ location for 'test_before' probe
+ *	1:	mov r0, r7	@ the test case instruction 'test_insn'
+ *		nop		@ location for 'test_after' probe
+ *
+ *	// TESTCASE_END
+ *	2:
+ *	99:	bl __kprobes_test_case_end_##TEST_ISA
+ *	.code	NONMAL_ISA
+ *
+ * When the above is execute the following happens...
+ *
+ * __kprobes_test_case_start() is an assembler wrapper which sets up space
+ * for a stack buffer and calls the C function kprobes_test_case_start().
+ * This C function will do some initial processing of the inline data and
+ * setup some global state. It then inserts the test_before and test_after
+ * kprobes and returns a value which causes the assembler wrapper to jump
+ * to the start of the test case code, (local label '0').
+ *
+ * When the test case code executes, the test_before probe will be hit and
+ * test_before_post_handler will call setup_test_context(). This fills the
+ * stack buffer and CPU registers with a test pattern and then processes
+ * the test case arguments. In our example there is one TEST_ARG_REG which
+ * indicates that R7 should be loaded with the value 0x12345678.
+ *
+ * When the test_before probe ends, the test case continues and executes
+ * the "mov r0, r7" instruction. It then hits the test_after probe and the
+ * pre-handler for this (test_after_pre_handler) will save a copy of the
+ * CPU register context. This should now have R0 holding the same value as
+ * R7.
+ *
+ * Finally we get to the call to __kprobes_test_case_end_{32,16}. This is
+ * an assembler wrapper which switches back to the ISA used by the test
+ * code and calls the C function kprobes_test_case_end().
+ *
+ * For each run through the test case, test_case_run_count is incremented
+ * by one. For even runs, kprobes_test_case_end() saves a copy of the
+ * register and stack buffer contents from the test case just run. It then
+ * inserts a kprobe on the test case instruction 'test_insn' and returns a
+ * value to cause the test case code to be re-run.
+ *
+ * For odd numbered runs, kprobes_test_case_end() compares the register and
+ * stack buffer contents to those that were saved on the previous even
+ * numbered run (the one without the kprobe on test_insn). These should be
+ * the same if the kprobe instruction simulation routine is correct.
+ *
+ * The pair of test case runs is repeated with different combinations of
+ * flag values in CPSR and, for Thumb, different ITState. This is
+ * controlled by test_context_cpsr().
+ *
+ * BUILDING TEST CASES
+ * -------------------
+ *
+ *
+ * As an aid to building test cases, the stack buffer is initialised with
+ * some special values:
+ *
+ *   [SP+13*4]	Contains SP+120. This can be used to test instructions
+ *		which load a value into SP.
+ *
+ *   [SP+15*4]	When testing branching instructions using TEST_BRANCH_{F,B},
+ *		this holds the target address of the branch, 'test_after2'.
+ *		This can be used to test instructions which load a PC value
+ *		from memory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kprobes.h>
+
+#include <asm/opcodes.h>
+
+#include "kprobes.h"
+#include "kprobes-test.h"
+
+
+#define BENCHMARKING	1
+
+
+/*
+ * Test basic API
+ */
+
+static bool test_regs_ok;
+static int test_func_instance;
+static int pre_handler_called;
+static int post_handler_called;
+static int jprobe_func_called;
+static int kretprobe_handler_called;
+
+#define FUNC_ARG1 0x12345678
+#define FUNC_ARG2 0xabcdef
+
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+long arm_func(long r0, long r1);
+
+static void __used __naked __arm_kprobes_test_func(void)
+{
+	__asm__ __volatile__ (
+		".arm					\n\t"
+		".type arm_func, %%function		\n\t"
+		"arm_func:				\n\t"
+		"adds	r0, r0, r1			\n\t"
+		"bx	lr				\n\t"
+		".code "NORMAL_ISA	 /* Back to Thumb if necessary */
+		: : : "r0", "r1", "cc"
+	);
+}
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+long thumb16_func(long r0, long r1);
+long thumb32even_func(long r0, long r1);
+long thumb32odd_func(long r0, long r1);
+
+static void __used __naked __thumb_kprobes_test_funcs(void)
+{
+	__asm__ __volatile__ (
+		".type thumb16_func, %%function		\n\t"
+		"thumb16_func:				\n\t"
+		"adds.n	r0, r0, r1			\n\t"
+		"bx	lr				\n\t"
+
+		".align					\n\t"
+		".type thumb32even_func, %%function	\n\t"
+		"thumb32even_func:			\n\t"
+		"adds.w	r0, r0, r1			\n\t"
+		"bx	lr				\n\t"
+
+		".align					\n\t"
+		"nop.n					\n\t"
+		".type thumb32odd_func, %%function	\n\t"
+		"thumb32odd_func:			\n\t"
+		"adds.w	r0, r0, r1			\n\t"
+		"bx	lr				\n\t"
+
+		: : : "r0", "r1", "cc"
+	);
+}
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+
+static int call_test_func(long (*func)(long, long), bool check_test_regs)
+{
+	long ret;
+
+	++test_func_instance;
+	test_regs_ok = false;
+
+	ret = (*func)(FUNC_ARG1, FUNC_ARG2);
+	if (ret != FUNC_ARG1 + FUNC_ARG2) {
+		pr_err("FAIL: call_test_func: func returned %lx\n", ret);
+		return false;
+	}
+
+	if (check_test_regs && !test_regs_ok) {
+		pr_err("FAIL: test regs not OK\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int __kprobes pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	pre_handler_called = test_func_instance;
+	if (regs->ARM_r0 == FUNC_ARG1 && regs->ARM_r1 == FUNC_ARG2)
+		test_regs_ok = true;
+	return 0;
+}
+
+static void __kprobes post_handler(struct kprobe *p, struct pt_regs *regs,
+				unsigned long flags)
+{
+	post_handler_called = test_func_instance;
+	if (regs->ARM_r0 != FUNC_ARG1 + FUNC_ARG2 || regs->ARM_r1 != FUNC_ARG2)
+		test_regs_ok = false;
+}
+
+static struct kprobe the_kprobe = {
+	.addr		= 0,
+	.pre_handler	= pre_handler,
+	.post_handler	= post_handler
+};
+
+static int test_kprobe(long (*func)(long, long))
+{
+	int ret;
+
+	the_kprobe.addr = (kprobe_opcode_t *)func;
+	ret = register_kprobe(&the_kprobe);
+	if (ret < 0) {
+		pr_err("FAIL: register_kprobe failed with %d\n", ret);
+		return ret;
+	}
+
+	ret = call_test_func(func, true);
+
+	unregister_kprobe(&the_kprobe);
+	the_kprobe.flags = 0; /* Clear disable flag to allow reuse */
+
+	if (!ret)
+		return -EINVAL;
+	if (pre_handler_called != test_func_instance) {
+		pr_err("FAIL: kprobe pre_handler not called\n");
+		return -EINVAL;
+	}
+	if (post_handler_called != test_func_instance) {
+		pr_err("FAIL: kprobe post_handler not called\n");
+		return -EINVAL;
+	}
+	if (!call_test_func(func, false))
+		return -EINVAL;
+	if (pre_handler_called == test_func_instance ||
+				post_handler_called == test_func_instance) {
+		pr_err("FAIL: probe called after unregistering\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void __kprobes jprobe_func(long r0, long r1)
+{
+	jprobe_func_called = test_func_instance;
+	if (r0 == FUNC_ARG1 && r1 == FUNC_ARG2)
+		test_regs_ok = true;
+	jprobe_return();
+}
+
+static struct jprobe the_jprobe = {
+	.entry		= jprobe_func,
+};
+
+static int test_jprobe(long (*func)(long, long))
+{
+	int ret;
+
+	the_jprobe.kp.addr = (kprobe_opcode_t *)func;
+	ret = register_jprobe(&the_jprobe);
+	if (ret < 0) {
+		pr_err("FAIL: register_jprobe failed with %d\n", ret);
+		return ret;
+	}
+
+	ret = call_test_func(func, true);
+
+	unregister_jprobe(&the_jprobe);
+	the_jprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
+
+	if (!ret)
+		return -EINVAL;
+	if (jprobe_func_called != test_func_instance) {
+		pr_err("FAIL: jprobe handler function not called\n");
+		return -EINVAL;
+	}
+	if (!call_test_func(func, false))
+		return -EINVAL;
+	if (jprobe_func_called == test_func_instance) {
+		pr_err("FAIL: probe called after unregistering\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __kprobes
+kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+	kretprobe_handler_called = test_func_instance;
+	if (regs_return_value(regs) == FUNC_ARG1 + FUNC_ARG2)
+		test_regs_ok = true;
+	return 0;
+}
+
+static struct kretprobe the_kretprobe = {
+	.handler	= kretprobe_handler,
+};
+
+static int test_kretprobe(long (*func)(long, long))
+{
+	int ret;
+
+	the_kretprobe.kp.addr = (kprobe_opcode_t *)func;
+	ret = register_kretprobe(&the_kretprobe);
+	if (ret < 0) {
+		pr_err("FAIL: register_kretprobe failed with %d\n", ret);
+		return ret;
+	}
+
+	ret = call_test_func(func, true);
+
+	unregister_kretprobe(&the_kretprobe);
+	the_kretprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
+
+	if (!ret)
+		return -EINVAL;
+	if (kretprobe_handler_called != test_func_instance) {
+		pr_err("FAIL: kretprobe handler not called\n");
+		return -EINVAL;
+	}
+	if (!call_test_func(func, false))
+		return -EINVAL;
+	if (jprobe_func_called == test_func_instance) {
+		pr_err("FAIL: kretprobe called after unregistering\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int run_api_tests(long (*func)(long, long))
+{
+	int ret;
+
+	pr_info("    kprobe\n");
+	ret = test_kprobe(func);
+	if (ret < 0)
+		return ret;
+
+	pr_info("    jprobe\n");
+	ret = test_jprobe(func);
+	if (ret < 0)
+		return ret;
+
+	pr_info("    kretprobe\n");
+	ret = test_kretprobe(func);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+
+/*
+ * Benchmarking
+ */
+
+#if BENCHMARKING
+
+static void __naked benchmark_nop(void)
+{
+	__asm__ __volatile__ (
+		"nop		\n\t"
+		"bx	lr"
+	);
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define wide ".w"
+#else
+#define wide
+#endif
+
+static void __naked benchmark_pushpop1(void)
+{
+	__asm__ __volatile__ (
+		"stmdb"wide"	sp!, {r3-r11,lr}  \n\t"
+		"ldmia"wide"	sp!, {r3-r11,pc}"
+	);
+}
+
+static void __naked benchmark_pushpop2(void)
+{
+	__asm__ __volatile__ (
+		"stmdb"wide"	sp!, {r0-r8,lr}  \n\t"
+		"ldmia"wide"	sp!, {r0-r8,pc}"
+	);
+}
+
+static void __naked benchmark_pushpop3(void)
+{
+	__asm__ __volatile__ (
+		"stmdb"wide"	sp!, {r4,lr}  \n\t"
+		"ldmia"wide"	sp!, {r4,pc}"
+	);
+}
+
+static void __naked benchmark_pushpop4(void)
+{
+	__asm__ __volatile__ (
+		"stmdb"wide"	sp!, {r0,lr}  \n\t"
+		"ldmia"wide"	sp!, {r0,pc}"
+	);
+}
+
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+static void __naked benchmark_pushpop_thumb(void)
+{
+	__asm__ __volatile__ (
+		"push.n	{r0-r7,lr}  \n\t"
+		"pop.n	{r0-r7,pc}"
+	);
+}
+
+#endif
+
+static int __kprobes
+benchmark_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static int benchmark(void(*fn)(void))
+{
+	unsigned n, i, t, t0;
+
+	for (n = 1000; ; n *= 2) {
+		t0 = sched_clock();
+		for (i = n; i > 0; --i)
+			fn();
+		t = sched_clock() - t0;
+		if (t >= 250000000)
+			break; /* Stop once we took more than 0.25 seconds */
+	}
+	return t / n; /* Time for one iteration in nanoseconds */
+};
+
+static int kprobe_benchmark(void(*fn)(void), unsigned offset)
+{
+	struct kprobe k = {
+		.addr		= (kprobe_opcode_t *)((uintptr_t)fn + offset),
+		.pre_handler	= benchmark_pre_handler,
+	};
+
+	int ret = register_kprobe(&k);
+	if (ret < 0) {
+		pr_err("FAIL: register_kprobe failed with %d\n", ret);
+		return ret;
+	}
+
+	ret = benchmark(fn);
+
+	unregister_kprobe(&k);
+	return ret;
+};
+
+struct benchmarks {
+	void		(*fn)(void);
+	unsigned	offset;
+	const char	*title;
+};
+
+static int run_benchmarks(void)
+{
+	int ret;
+	struct benchmarks list[] = {
+		{&benchmark_nop, 0, "nop"},
+		/*
+		 * benchmark_pushpop{1,3} will have the optimised
+		 * instruction emulation, whilst benchmark_pushpop{2,4} will
+		 * be the equivalent unoptimised instructions.
+		 */
+		{&benchmark_pushpop1, 0, "stmdb	sp!, {r3-r11,lr}"},
+		{&benchmark_pushpop1, 4, "ldmia	sp!, {r3-r11,pc}"},
+		{&benchmark_pushpop2, 0, "stmdb	sp!, {r0-r8,lr}"},
+		{&benchmark_pushpop2, 4, "ldmia	sp!, {r0-r8,pc}"},
+		{&benchmark_pushpop3, 0, "stmdb	sp!, {r4,lr}"},
+		{&benchmark_pushpop3, 4, "ldmia	sp!, {r4,pc}"},
+		{&benchmark_pushpop4, 0, "stmdb	sp!, {r0,lr}"},
+		{&benchmark_pushpop4, 4, "ldmia	sp!, {r0,pc}"},
+#ifdef CONFIG_THUMB2_KERNEL
+		{&benchmark_pushpop_thumb, 0, "push.n	{r0-r7,lr}"},
+		{&benchmark_pushpop_thumb, 2, "pop.n	{r0-r7,pc}"},
+#endif
+		{0}
+	};
+
+	struct benchmarks *b;
+	for (b = list; b->fn; ++b) {
+		ret = kprobe_benchmark(b->fn, b->offset);
+		if (ret < 0)
+			return ret;
+		pr_info("    %dns for kprobe %s\n", ret, b->title);
+	}
+
+	pr_info("\n");
+	return 0;
+}
+
+#endif /* BENCHMARKING */
+
+
+/*
+ * Decoding table self-consistency tests
+ */
+
+static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
+	[DECODE_TYPE_TABLE]	= sizeof(struct decode_table),
+	[DECODE_TYPE_CUSTOM]	= sizeof(struct decode_custom),
+	[DECODE_TYPE_SIMULATE]	= sizeof(struct decode_simulate),
+	[DECODE_TYPE_EMULATE]	= sizeof(struct decode_emulate),
+	[DECODE_TYPE_OR]	= sizeof(struct decode_or),
+	[DECODE_TYPE_REJECT]	= sizeof(struct decode_reject)
+};
+
+static int table_iter(const union decode_item *table,
+			int (*fn)(const struct decode_header *, void *),
+			void *args)
+{
+	const struct decode_header *h = (struct decode_header *)table;
+	int result;
+
+	for (;;) {
+		enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+		if (type == DECODE_TYPE_END)
+			return 0;
+
+		result = fn(h, args);
+		if (result)
+			return result;
+
+		h = (struct decode_header *)
+			((uintptr_t)h + decode_struct_sizes[type]);
+
+	}
+}
+
+static int table_test_fail(const struct decode_header *h, const char* message)
+{
+
+	pr_err("FAIL: kprobes test failure \"%s\" (mask %08x, value %08x)\n",
+					message, h->mask.bits, h->value.bits);
+	return -EINVAL;
+}
+
+struct table_test_args {
+	const union decode_item *root_table;
+	u32			parent_mask;
+	u32			parent_value;
+};
+
+static int table_test_fn(const struct decode_header *h, void *args)
+{
+	struct table_test_args *a = (struct table_test_args *)args;
+	enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+	if (h->value.bits & ~h->mask.bits)
+		return table_test_fail(h, "Match value has bits not in mask");
+
+	if ((h->mask.bits & a->parent_mask) != a->parent_mask)
+		return table_test_fail(h, "Mask has bits not in parent mask");
+
+	if ((h->value.bits ^ a->parent_value) & a->parent_mask)
+		return table_test_fail(h, "Value is inconsistent with parent");
+
+	if (type == DECODE_TYPE_TABLE) {
+		struct decode_table *d = (struct decode_table *)h;
+		struct table_test_args args2 = *a;
+		args2.parent_mask = h->mask.bits;
+		args2.parent_value = h->value.bits;
+		return table_iter(d->table.table, table_test_fn, &args2);
+	}
+
+	return 0;
+}
+
+static int table_test(const union decode_item *table)
+{
+	struct table_test_args args = {
+		.root_table	= table,
+		.parent_mask	= 0,
+		.parent_value	= 0
+	};
+	return table_iter(args.root_table, table_test_fn, &args);
+}
+
+
+/*
+ * Decoding table test coverage analysis
+ *
+ * coverage_start() builds a coverage_table which contains a list of
+ * coverage_entry's to match each entry in the specified kprobes instruction
+ * decoding table.
+ *
+ * When test cases are run, coverage_add() is called to process each case.
+ * This looks up the corresponding entry in the coverage_table and sets it as
+ * being matched, as well as clearing the regs flag appropriate for the test.
+ *
+ * After all test cases have been run, coverage_end() is called to check that
+ * all entries in coverage_table have been matched and that all regs flags are
+ * cleared. I.e. that all possible combinations of instructions described by
+ * the kprobes decoding tables have had a test case executed for them.
+ */
+
+bool coverage_fail;
+
+#define MAX_COVERAGE_ENTRIES 256
+
+struct coverage_entry {
+	const struct decode_header	*header;
+	unsigned			regs;
+	unsigned			nesting;
+	char				matched;
+};
+
+struct coverage_table {
+	struct coverage_entry	*base;
+	unsigned		num_entries;
+	unsigned		nesting;
+};
+
+struct coverage_table coverage;
+
+#define COVERAGE_ANY_REG	(1<<0)
+#define COVERAGE_SP		(1<<1)
+#define COVERAGE_PC		(1<<2)
+#define COVERAGE_PCWB		(1<<3)
+
+static const char coverage_register_lookup[16] = {
+	[REG_TYPE_ANY]		= COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
+	[REG_TYPE_SAMEAS16]	= COVERAGE_ANY_REG,
+	[REG_TYPE_SP]		= COVERAGE_SP,
+	[REG_TYPE_PC]		= COVERAGE_PC,
+	[REG_TYPE_NOSP]		= COVERAGE_ANY_REG | COVERAGE_SP,
+	[REG_TYPE_NOSPPC]	= COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
+	[REG_TYPE_NOPC]		= COVERAGE_ANY_REG | COVERAGE_PC,
+	[REG_TYPE_NOPCWB]	= COVERAGE_ANY_REG | COVERAGE_PC | COVERAGE_PCWB,
+	[REG_TYPE_NOPCX]	= COVERAGE_ANY_REG,
+	[REG_TYPE_NOSPPCX]	= COVERAGE_ANY_REG | COVERAGE_SP,
+};
+
+unsigned coverage_start_registers(const struct decode_header *h)
+{
+	unsigned regs = 0;
+	int i;
+	for (i = 0; i < 20; i += 4) {
+		int r = (h->type_regs.bits >> (DECODE_TYPE_BITS + i)) & 0xf;
+		regs |= coverage_register_lookup[r] << i;
+	}
+	return regs;
+}
+
+static int coverage_start_fn(const struct decode_header *h, void *args)
+{
+	struct coverage_table *coverage = (struct coverage_table *)args;
+	enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+	struct coverage_entry *entry = coverage->base + coverage->num_entries;
+
+	if (coverage->num_entries == MAX_COVERAGE_ENTRIES - 1) {
+		pr_err("FAIL: Out of space for test coverage data");
+		return -ENOMEM;
+	}
+
+	++coverage->num_entries;
+
+	entry->header = h;
+	entry->regs = coverage_start_registers(h);
+	entry->nesting = coverage->nesting;
+	entry->matched = false;
+
+	if (type == DECODE_TYPE_TABLE) {
+		struct decode_table *d = (struct decode_table *)h;
+		int ret;
+		++coverage->nesting;
+		ret = table_iter(d->table.table, coverage_start_fn, coverage);
+		--coverage->nesting;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int coverage_start(const union decode_item *table)
+{
+	coverage.base = kmalloc(MAX_COVERAGE_ENTRIES *
+				sizeof(struct coverage_entry), GFP_KERNEL);
+	coverage.num_entries = 0;
+	coverage.nesting = 0;
+	return table_iter(table, coverage_start_fn, &coverage);
+}
+
+static void
+coverage_add_registers(struct coverage_entry *entry, kprobe_opcode_t insn)
+{
+	int regs = entry->header->type_regs.bits >> DECODE_TYPE_BITS;
+	int i;
+	for (i = 0; i < 20; i += 4) {
+		enum decode_reg_type reg_type = (regs >> i) & 0xf;
+		int reg = (insn >> i) & 0xf;
+		int flag;
+
+		if (!reg_type)
+			continue;
+
+		if (reg == 13)
+			flag = COVERAGE_SP;
+		else if (reg == 15)
+			flag = COVERAGE_PC;
+		else
+			flag = COVERAGE_ANY_REG;
+		entry->regs &= ~(flag << i);
+
+		switch (reg_type) {
+
+		case REG_TYPE_NONE:
+		case REG_TYPE_ANY:
+		case REG_TYPE_SAMEAS16:
+			break;
+
+		case REG_TYPE_SP:
+			if (reg != 13)
+				return;
+			break;
+
+		case REG_TYPE_PC:
+			if (reg != 15)
+				return;
+			break;
+
+		case REG_TYPE_NOSP:
+			if (reg == 13)
+				return;
+			break;
+
+		case REG_TYPE_NOSPPC:
+		case REG_TYPE_NOSPPCX:
+			if (reg == 13 || reg == 15)
+				return;
+			break;
+
+		case REG_TYPE_NOPCWB:
+			if (!is_writeback(insn))
+				break;
+			if (reg == 15) {
+				entry->regs &= ~(COVERAGE_PCWB << i);
+				return;
+			}
+			break;
+
+		case REG_TYPE_NOPC:
+		case REG_TYPE_NOPCX:
+			if (reg == 15)
+				return;
+			break;
+		}
+
+	}
+}
+
+static void coverage_add(kprobe_opcode_t insn)
+{
+	struct coverage_entry *entry = coverage.base;
+	struct coverage_entry *end = coverage.base + coverage.num_entries;
+	bool matched = false;
+	unsigned nesting = 0;
+
+	for (; entry < end; ++entry) {
+		const struct decode_header *h = entry->header;
+		enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+		if (entry->nesting > nesting)
+			continue; /* Skip sub-table we didn't match */
+
+		if (entry->nesting < nesting)
+			break; /* End of sub-table we were scanning */
+
+		if (!matched) {
+			if ((insn & h->mask.bits) != h->value.bits)
+				continue;
+			entry->matched = true;
+		}
+
+		switch (type) {
+
+		case DECODE_TYPE_TABLE:
+			++nesting;
+			break;
+
+		case DECODE_TYPE_CUSTOM:
+		case DECODE_TYPE_SIMULATE:
+		case DECODE_TYPE_EMULATE:
+			coverage_add_registers(entry, insn);
+			return;
+
+		case DECODE_TYPE_OR:
+			matched = true;
+			break;
+
+		case DECODE_TYPE_REJECT:
+		default:
+			return;
+		}
+
+	}
+}
+
+static void coverage_end(void)
+{
+	struct coverage_entry *entry = coverage.base;
+	struct coverage_entry *end = coverage.base + coverage.num_entries;
+
+	for (; entry < end; ++entry) {
+		u32 mask = entry->header->mask.bits;
+		u32 value = entry->header->value.bits;
+
+		if (entry->regs) {
+			pr_err("FAIL: Register test coverage missing for %08x %08x (%05x)\n",
+				mask, value, entry->regs);
+			coverage_fail = true;
+		}
+		if (!entry->matched) {
+			pr_err("FAIL: Test coverage entry missing for %08x %08x\n",
+				mask, value);
+			coverage_fail = true;
+		}
+	}
+
+	kfree(coverage.base);
+}
+
+
+/*
+ * Framework for instruction set test cases
+ */
+
+void __naked __kprobes_test_case_start(void)
+{
+	__asm__ __volatile__ (
+		"stmdb	sp!, {r4-r11}				\n\t"
+		"sub	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+		"bic	r0, lr, #1  @ r0 = inline title string	\n\t"
+		"mov	r1, sp					\n\t"
+		"bl	kprobes_test_case_start			\n\t"
+		"bx	r0					\n\t"
+	);
+}
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+void __naked __kprobes_test_case_end_32(void)
+{
+	__asm__ __volatile__ (
+		"mov	r4, lr					\n\t"
+		"bl	kprobes_test_case_end			\n\t"
+		"cmp	r0, #0					\n\t"
+		"movne	pc, r0					\n\t"
+		"mov	r0, r4					\n\t"
+		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+		"ldmia	sp!, {r4-r11}				\n\t"
+		"mov	pc, r0					\n\t"
+	);
+}
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+void __naked __kprobes_test_case_end_16(void)
+{
+	__asm__ __volatile__ (
+		"mov	r4, lr					\n\t"
+		"bl	kprobes_test_case_end			\n\t"
+		"cmp	r0, #0					\n\t"
+		"bxne	r0					\n\t"
+		"mov	r0, r4					\n\t"
+		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+		"ldmia	sp!, {r4-r11}				\n\t"
+		"bx	r0					\n\t"
+	);
+}
+
+void __naked __kprobes_test_case_end_32(void)
+{
+	__asm__ __volatile__ (
+		".arm						\n\t"
+		"orr	lr, lr, #1  @ will return to Thumb code	\n\t"
+		"ldr	pc, 1f					\n\t"
+		"1:						\n\t"
+		".word	__kprobes_test_case_end_16		\n\t"
+	);
+}
+
+#endif
+
+
+int kprobe_test_flags;
+int kprobe_test_cc_position;
+
+static int test_try_count;
+static int test_pass_count;
+static int test_fail_count;
+
+static struct pt_regs initial_regs;
+static struct pt_regs expected_regs;
+static struct pt_regs result_regs;
+
+static u32 expected_memory[TEST_MEMORY_SIZE/sizeof(u32)];
+
+static const char *current_title;
+static struct test_arg *current_args;
+static u32 *current_stack;
+static uintptr_t current_branch_target;
+
+static uintptr_t current_code_start;
+static kprobe_opcode_t current_instruction;
+
+
+#define TEST_CASE_PASSED -1
+#define TEST_CASE_FAILED -2
+
+static int test_case_run_count;
+static bool test_case_is_thumb;
+static int test_instance;
+
+/*
+ * We ignore the state of the imprecise abort disable flag (CPSR.A) because this
+ * can change randomly as the kernel doesn't take care to preserve or initialise
+ * this across context switches. Also, with Security Extentions, the flag may
+ * not be under control of the kernel; for this reason we ignore the state of
+ * the FIQ disable flag CPSR.F as well.
+ */
+#define PSR_IGNORE_BITS (PSR_A_BIT | PSR_F_BIT)
+
+static unsigned long test_check_cc(int cc, unsigned long cpsr)
+{
+	int ret = arm_check_condition(cc << 28, cpsr);
+
+	return (ret != ARM_OPCODE_CONDTEST_FAIL);
+}
+
+static int is_last_scenario;
+static int probe_should_run; /* 0 = no, 1 = yes, -1 = unknown */
+static int memory_needs_checking;
+
+static unsigned long test_context_cpsr(int scenario)
+{
+	unsigned long cpsr;
+
+	probe_should_run = 1;
+
+	/* Default case is that we cycle through 16 combinations of flags */
+	cpsr  = (scenario & 0xf) << 28; /* N,Z,C,V flags */
+	cpsr |= (scenario & 0xf) << 16; /* GE flags */
+	cpsr |= (scenario & 0x1) << 27; /* Toggle Q flag */
+
+	if (!test_case_is_thumb) {
+		/* Testing ARM code */
+		int cc = current_instruction >> 28;
+
+		probe_should_run = test_check_cc(cc, cpsr) != 0;
+		if (scenario == 15)
+			is_last_scenario = true;
+
+	} else if (kprobe_test_flags & TEST_FLAG_NO_ITBLOCK) {
+		/* Testing Thumb code without setting ITSTATE */
+		if (kprobe_test_cc_position) {
+			int cc = (current_instruction >> kprobe_test_cc_position) & 0xf;
+			probe_should_run = test_check_cc(cc, cpsr) != 0;
+		}
+
+		if (scenario == 15)
+			is_last_scenario = true;
+
+	} else if (kprobe_test_flags & TEST_FLAG_FULL_ITBLOCK) {
+		/* Testing Thumb code with all combinations of ITSTATE */
+		unsigned x = (scenario >> 4);
+		unsigned cond_base = x % 7; /* ITSTATE<7:5> */
+		unsigned mask = x / 7 + 2;  /* ITSTATE<4:0>, bits reversed */
+
+		if (mask > 0x1f) {
+			/* Finish by testing state from instruction 'itt al' */
+			cond_base = 7;
+			mask = 0x4;
+			if ((scenario & 0xf) == 0xf)
+				is_last_scenario = true;
+		}
+
+		cpsr |= cond_base << 13;	/* ITSTATE<7:5> */
+		cpsr |= (mask & 0x1) << 12;	/* ITSTATE<4> */
+		cpsr |= (mask & 0x2) << 10;	/* ITSTATE<3> */
+		cpsr |= (mask & 0x4) << 8;	/* ITSTATE<2> */
+		cpsr |= (mask & 0x8) << 23;	/* ITSTATE<1> */
+		cpsr |= (mask & 0x10) << 21;	/* ITSTATE<0> */
+
+		probe_should_run = test_check_cc((cpsr >> 12) & 0xf, cpsr) != 0;
+
+	} else {
+		/* Testing Thumb code with several combinations of ITSTATE */
+		switch (scenario) {
+		case 16: /* Clear NZCV flags and 'it eq' state (false as Z=0) */
+			cpsr = 0x00000800;
+			probe_should_run = 0;
+			break;
+		case 17: /* Set NZCV flags and 'it vc' state (false as V=1) */
+			cpsr = 0xf0007800;
+			probe_should_run = 0;
+			break;
+		case 18: /* Clear NZCV flags and 'it ls' state (true as C=0) */
+			cpsr = 0x00009800;
+			break;
+		case 19: /* Set NZCV flags and 'it cs' state (true as C=1) */
+			cpsr = 0xf0002800;
+			is_last_scenario = true;
+			break;
+		}
+	}
+
+	return cpsr;
+}
+
+static void setup_test_context(struct pt_regs *regs)
+{
+	int scenario = test_case_run_count>>1;
+	unsigned long val;
+	struct test_arg *args;
+	int i;
+
+	is_last_scenario = false;
+	memory_needs_checking = false;
+
+	/* Initialise test memory on stack */
+	val = (scenario & 1) ? VALM : ~VALM;
+	for (i = 0; i < TEST_MEMORY_SIZE / sizeof(current_stack[0]); ++i)
+		current_stack[i] = val + (i << 8);
+	/* Put target of branch on stack for tests which load PC from memory */
+	if (current_branch_target)
+		current_stack[15] = current_branch_target;
+	/* Put a value for SP on stack for tests which load SP from memory */
+	current_stack[13] = (u32)current_stack + 120;
+
+	/* Initialise register values to their default state */
+	val = (scenario & 2) ? VALR : ~VALR;
+	for (i = 0; i < 13; ++i)
+		regs->uregs[i] = val ^ (i << 8);
+	regs->ARM_lr = val ^ (14 << 8);
+	regs->ARM_cpsr &= ~(APSR_MASK | PSR_IT_MASK);
+	regs->ARM_cpsr |= test_context_cpsr(scenario);
+
+	/* Perform testcase specific register setup  */
+	args = current_args;
+	for (; args[0].type != ARG_TYPE_END; ++args)
+		switch (args[0].type) {
+		case ARG_TYPE_REG: {
+			struct test_arg_regptr *arg =
+				(struct test_arg_regptr *)args;
+			regs->uregs[arg->reg] = arg->val;
+			break;
+		}
+		case ARG_TYPE_PTR: {
+			struct test_arg_regptr *arg =
+				(struct test_arg_regptr *)args;
+			regs->uregs[arg->reg] =
+				(unsigned long)current_stack + arg->val;
+			memory_needs_checking = true;
+			break;
+		}
+		case ARG_TYPE_MEM: {
+			struct test_arg_mem *arg = (struct test_arg_mem *)args;
+			current_stack[arg->index] = arg->val;
+			break;
+		}
+		default:
+			break;
+		}
+}
+
+struct test_probe {
+	struct kprobe	kprobe;
+	bool		registered;
+	int		hit;
+};
+
+static void unregister_test_probe(struct test_probe *probe)
+{
+	if (probe->registered) {
+		unregister_kprobe(&probe->kprobe);
+		probe->kprobe.flags = 0; /* Clear disable flag to allow reuse */
+	}
+	probe->registered = false;
+}
+
+static int register_test_probe(struct test_probe *probe)
+{
+	int ret;
+
+	if (probe->registered)
+		BUG();
+
+	ret = register_kprobe(&probe->kprobe);
+	if (ret >= 0) {
+		probe->registered = true;
+		probe->hit = -1;
+	}
+	return ret;
+}
+
+static int __kprobes
+test_before_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	container_of(p, struct test_probe, kprobe)->hit = test_instance;
+	return 0;
+}
+
+static void __kprobes
+test_before_post_handler(struct kprobe *p, struct pt_regs *regs,
+							unsigned long flags)
+{
+	setup_test_context(regs);
+	initial_regs = *regs;
+	initial_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
+}
+
+static int __kprobes
+test_case_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	container_of(p, struct test_probe, kprobe)->hit = test_instance;
+	return 0;
+}
+
+static int __kprobes
+test_after_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	if (container_of(p, struct test_probe, kprobe)->hit == test_instance)
+		return 0; /* Already run for this test instance */
+
+	result_regs = *regs;
+	result_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
+
+	/* Undo any changes done to SP by the test case */
+	regs->ARM_sp = (unsigned long)current_stack;
+
+	container_of(p, struct test_probe, kprobe)->hit = test_instance;
+	return 0;
+}
+
+static struct test_probe test_before_probe = {
+	.kprobe.pre_handler	= test_before_pre_handler,
+	.kprobe.post_handler	= test_before_post_handler,
+};
+
+static struct test_probe test_case_probe = {
+	.kprobe.pre_handler	= test_case_pre_handler,
+};
+
+static struct test_probe test_after_probe = {
+	.kprobe.pre_handler	= test_after_pre_handler,
+};
+
+static struct test_probe test_after2_probe = {
+	.kprobe.pre_handler	= test_after_pre_handler,
+};
+
+static void test_case_cleanup(void)
+{
+	unregister_test_probe(&test_before_probe);
+	unregister_test_probe(&test_case_probe);
+	unregister_test_probe(&test_after_probe);
+	unregister_test_probe(&test_after2_probe);
+}
+
+static void print_registers(struct pt_regs *regs)
+{
+	pr_err("r0  %08lx | r1  %08lx | r2  %08lx | r3  %08lx\n",
+		regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+	pr_err("r4  %08lx | r5  %08lx | r6  %08lx | r7  %08lx\n",
+		regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+	pr_err("r8  %08lx | r9  %08lx | r10 %08lx | r11 %08lx\n",
+		regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp);
+	pr_err("r12 %08lx | sp  %08lx | lr  %08lx | pc  %08lx\n",
+		regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc);
+	pr_err("cpsr %08lx\n", regs->ARM_cpsr);
+}
+
+static void print_memory(u32 *mem, size_t size)
+{
+	int i;
+	for (i = 0; i < size / sizeof(u32); i += 4)
+		pr_err("%08x %08x %08x %08x\n", mem[i], mem[i+1],
+						mem[i+2], mem[i+3]);
+}
+
+static size_t expected_memory_size(u32 *sp)
+{
+	size_t size = sizeof(expected_memory);
+	int offset = (uintptr_t)sp - (uintptr_t)current_stack;
+	if (offset > 0)
+		size -= offset;
+	return size;
+}
+
+static void test_case_failed(const char *message)
+{
+	test_case_cleanup();
+
+	pr_err("FAIL: %s\n", message);
+	pr_err("FAIL: Test %s\n", current_title);
+	pr_err("FAIL: Scenario %d\n", test_case_run_count >> 1);
+}
+
+static unsigned long next_instruction(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	if ((pc & 1) && !is_wide_instruction(*(u16 *)(pc - 1)))
+		return pc + 2;
+	else
+#endif
+	return pc + 4;
+}
+
+static uintptr_t __used kprobes_test_case_start(const char *title, void *stack)
+{
+	struct test_arg *args;
+	struct test_arg_end *end_arg;
+	unsigned long test_code;
+
+	args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4);
+
+	current_title = title;
+	current_args = args;
+	current_stack = stack;
+
+	++test_try_count;
+
+	while (args->type != ARG_TYPE_END)
+		++args;
+	end_arg = (struct test_arg_end *)args;
+
+	test_code = (unsigned long)(args + 1); /* Code starts after args */
+
+	test_case_is_thumb = end_arg->flags & ARG_FLAG_THUMB;
+	if (test_case_is_thumb)
+		test_code |= 1;
+
+	current_code_start = test_code;
+
+	current_branch_target = 0;
+	if (end_arg->branch_offset != end_arg->end_offset)
+		current_branch_target = test_code + end_arg->branch_offset;
+
+	test_code += end_arg->code_offset;
+	test_before_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+	test_code = next_instruction(test_code);
+	test_case_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+	if (test_case_is_thumb) {
+		u16 *p = (u16 *)(test_code & ~1);
+		current_instruction = p[0];
+		if (is_wide_instruction(current_instruction)) {
+			current_instruction <<= 16;
+			current_instruction |= p[1];
+		}
+	} else {
+		current_instruction = *(u32 *)test_code;
+	}
+
+	if (current_title[0] == '.')
+		verbose("%s\n", current_title);
+	else
+		verbose("%s\t@ %0*x\n", current_title,
+					test_case_is_thumb ? 4 : 8,
+					current_instruction);
+
+	test_code = next_instruction(test_code);
+	test_after_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+	if (kprobe_test_flags & TEST_FLAG_NARROW_INSTR) {
+		if (!test_case_is_thumb ||
+			is_wide_instruction(current_instruction)) {
+				test_case_failed("expected 16-bit instruction");
+				goto fail;
+		}
+	} else {
+		if (test_case_is_thumb &&
+			!is_wide_instruction(current_instruction)) {
+				test_case_failed("expected 32-bit instruction");
+				goto fail;
+		}
+	}
+
+	coverage_add(current_instruction);
+
+	if (end_arg->flags & ARG_FLAG_UNSUPPORTED) {
+		if (register_test_probe(&test_case_probe) < 0)
+			goto pass;
+		test_case_failed("registered probe for unsupported instruction");
+		goto fail;
+	}
+
+	if (end_arg->flags & ARG_FLAG_SUPPORTED) {
+		if (register_test_probe(&test_case_probe) >= 0)
+			goto pass;
+		test_case_failed("couldn't register probe for supported instruction");
+		goto fail;
+	}
+
+	if (register_test_probe(&test_before_probe) < 0) {
+		test_case_failed("register test_before_probe failed");
+		goto fail;
+	}
+	if (register_test_probe(&test_after_probe) < 0) {
+		test_case_failed("register test_after_probe failed");
+		goto fail;
+	}
+	if (current_branch_target) {
+		test_after2_probe.kprobe.addr =
+				(kprobe_opcode_t *)current_branch_target;
+		if (register_test_probe(&test_after2_probe) < 0) {
+			test_case_failed("register test_after2_probe failed");
+			goto fail;
+		}
+	}
+
+	/* Start first run of test case */
+	test_case_run_count = 0;
+	++test_instance;
+	return current_code_start;
+pass:
+	test_case_run_count = TEST_CASE_PASSED;
+	return (uintptr_t)test_after_probe.kprobe.addr;
+fail:
+	test_case_run_count = TEST_CASE_FAILED;
+	return (uintptr_t)test_after_probe.kprobe.addr;
+}
+
+static bool check_test_results(void)
+{
+	size_t mem_size = 0;
+	u32 *mem = 0;
+
+	if (memcmp(&expected_regs, &result_regs, sizeof(expected_regs))) {
+		test_case_failed("registers differ");
+		goto fail;
+	}
+
+	if (memory_needs_checking) {
+		mem = (u32 *)result_regs.ARM_sp;
+		mem_size = expected_memory_size(mem);
+		if (memcmp(expected_memory, mem, mem_size)) {
+			test_case_failed("test memory differs");
+			goto fail;
+		}
+	}
+
+	return true;
+
+fail:
+	pr_err("initial_regs:\n");
+	print_registers(&initial_regs);
+	pr_err("expected_regs:\n");
+	print_registers(&expected_regs);
+	pr_err("result_regs:\n");
+	print_registers(&result_regs);
+
+	if (mem) {
+		pr_err("current_stack=%p\n", current_stack);
+		pr_err("expected_memory:\n");
+		print_memory(expected_memory, mem_size);
+		pr_err("result_memory:\n");
+		print_memory(mem, mem_size);
+	}
+
+	return false;
+}
+
+static uintptr_t __used kprobes_test_case_end(void)
+{
+	if (test_case_run_count < 0) {
+		if (test_case_run_count == TEST_CASE_PASSED)
+			/* kprobes_test_case_start did all the needed testing */
+			goto pass;
+		else
+			/* kprobes_test_case_start failed */
+			goto fail;
+	}
+
+	if (test_before_probe.hit != test_instance) {
+		test_case_failed("test_before_handler not run");
+		goto fail;
+	}
+
+	if (test_after_probe.hit != test_instance &&
+				test_after2_probe.hit != test_instance) {
+		test_case_failed("test_after_handler not run");
+		goto fail;
+	}
+
+	/*
+	 * Even numbered test runs ran without a probe on the test case so
+	 * we can gather reference results. The subsequent odd numbered run
+	 * will have the probe inserted.
+	*/
+	if ((test_case_run_count & 1) == 0) {
+		/* Save results from run without probe */
+		u32 *mem = (u32 *)result_regs.ARM_sp;
+		expected_regs = result_regs;
+		memcpy(expected_memory, mem, expected_memory_size(mem));
+
+		/* Insert probe onto test case instruction */
+		if (register_test_probe(&test_case_probe) < 0) {
+			test_case_failed("register test_case_probe failed");
+			goto fail;
+		}
+	} else {
+		/* Check probe ran as expected */
+		if (probe_should_run == 1) {
+			if (test_case_probe.hit != test_instance) {
+				test_case_failed("test_case_handler not run");
+				goto fail;
+			}
+		} else if (probe_should_run == 0) {
+			if (test_case_probe.hit == test_instance) {
+				test_case_failed("test_case_handler ran");
+				goto fail;
+			}
+		}
+
+		/* Remove probe for any subsequent reference run */
+		unregister_test_probe(&test_case_probe);
+
+		if (!check_test_results())
+			goto fail;
+
+		if (is_last_scenario)
+			goto pass;
+	}
+
+	/* Do next test run */
+	++test_case_run_count;
+	++test_instance;
+	return current_code_start;
+fail:
+	++test_fail_count;
+	goto end;
+pass:
+	++test_pass_count;
+end:
+	test_case_cleanup();
+	return 0;
+}
+
+
+/*
+ * Top level test functions
+ */
+
+static int run_test_cases(void (*tests)(void), const union decode_item *table)
+{
+	int ret;
+
+	pr_info("    Check decoding tables\n");
+	ret = table_test(table);
+	if (ret)
+		return ret;
+
+	pr_info("    Run test cases\n");
+	ret = coverage_start(table);
+	if (ret)
+		return ret;
+
+	tests();
+
+	coverage_end();
+	return 0;
+}
+
+
+static int __init run_all_tests(void)
+{
+	int ret = 0;
+
+	pr_info("Begining kprobe tests...\n");
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+	pr_info("Probe ARM code\n");
+	ret = run_api_tests(arm_func);
+	if (ret)
+		goto out;
+
+	pr_info("ARM instruction simulation\n");
+	ret = run_test_cases(kprobe_arm_test_cases, kprobe_decode_arm_table);
+	if (ret)
+		goto out;
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+	pr_info("Probe 16-bit Thumb code\n");
+	ret = run_api_tests(thumb16_func);
+	if (ret)
+		goto out;
+
+	pr_info("Probe 32-bit Thumb code, even halfword\n");
+	ret = run_api_tests(thumb32even_func);
+	if (ret)
+		goto out;
+
+	pr_info("Probe 32-bit Thumb code, odd halfword\n");
+	ret = run_api_tests(thumb32odd_func);
+	if (ret)
+		goto out;
+
+	pr_info("16-bit Thumb instruction simulation\n");
+	ret = run_test_cases(kprobe_thumb16_test_cases,
+				kprobe_decode_thumb16_table);
+	if (ret)
+		goto out;
+
+	pr_info("32-bit Thumb instruction simulation\n");
+	ret = run_test_cases(kprobe_thumb32_test_cases,
+				kprobe_decode_thumb32_table);
+	if (ret)
+		goto out;
+#endif
+
+	pr_info("Total instruction simulation tests=%d, pass=%d fail=%d\n",
+		test_try_count, test_pass_count, test_fail_count);
+	if (test_fail_count) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+#if BENCHMARKING
+	pr_info("Benchmarks\n");
+	ret = run_benchmarks();
+	if (ret)
+		goto out;
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+	/* We are able to run all test cases so coverage should be complete */
+	if (coverage_fail) {
+		pr_err("FAIL: Test coverage checks failed\n");
+		ret = -EINVAL;
+		goto out;
+	}
+#endif
+
+out:
+	if (ret == 0)
+		pr_info("Finished kprobe tests OK\n");
+	else
+		pr_err("kprobe tests failed\n");
+
+	return ret;
+}
+
+
+/*
+ * Module setup
+ */
+
+#ifdef MODULE
+
+static void __exit kprobe_test_exit(void)
+{
+}
+
+module_init(run_all_tests)
+module_exit(kprobe_test_exit)
+MODULE_LICENSE("GPL");
+
+#else /* !MODULE */
+
+late_initcall(run_all_tests);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.h
new file mode 100644
index 0000000..e28a869
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-test.h
@@ -0,0 +1,432 @@
+/*
+ * arch/arm/kernel/kprobes-test.h
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define VERBOSE 0 /* Set to '1' for more logging of test cases */
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define NORMAL_ISA "16"
+#else
+#define NORMAL_ISA "32"
+#endif
+
+
+/* Flags used in kprobe_test_flags */
+#define TEST_FLAG_NO_ITBLOCK	(1<<0)
+#define TEST_FLAG_FULL_ITBLOCK	(1<<1)
+#define TEST_FLAG_NARROW_INSTR	(1<<2)
+
+extern int kprobe_test_flags;
+extern int kprobe_test_cc_position;
+
+
+#define TEST_MEMORY_SIZE 256
+
+
+/*
+ * Test case structures.
+ *
+ * The arguments given to test cases can be one of three types.
+ *
+ *   ARG_TYPE_REG
+ *	Load a register with the given value.
+ *
+ *   ARG_TYPE_PTR
+ *	Load a register with a pointer into the stack buffer (SP + given value).
+ *
+ *   ARG_TYPE_MEM
+ *	Store the given value into the stack buffer at [SP+index].
+ *
+ */
+
+#define	ARG_TYPE_END	0
+#define	ARG_TYPE_REG	1
+#define	ARG_TYPE_PTR	2
+#define	ARG_TYPE_MEM	3
+
+#define ARG_FLAG_UNSUPPORTED	0x01
+#define ARG_FLAG_SUPPORTED	0x02
+#define ARG_FLAG_THUMB		0x10	/* Must be 16 so TEST_ISA can be used */
+#define ARG_FLAG_ARM		0x20	/* Must be 32 so TEST_ISA can be used */
+
+struct test_arg {
+	u8	type;		/* ARG_TYPE_x */
+	u8	_padding[7];
+};
+
+struct test_arg_regptr {
+	u8	type;		/* ARG_TYPE_REG or ARG_TYPE_PTR */
+	u8	reg;
+	u8	_padding[2];
+	u32	val;
+};
+
+struct test_arg_mem {
+	u8	type;		/* ARG_TYPE_MEM */
+	u8	index;
+	u8	_padding[2];
+	u32	val;
+};
+
+struct test_arg_end {
+	u8	type;		/* ARG_TYPE_END */
+	u8	flags;		/* ARG_FLAG_x */
+	u16	code_offset;
+	u16	branch_offset;
+	u16	end_offset;
+};
+
+
+/*
+ * Building blocks for test cases.
+ *
+ * Each test case is wrapped between TESTCASE_START and TESTCASE_END.
+ *
+ * To specify arguments for a test case the TEST_ARG_{REG,PTR,MEM} macros are
+ * used followed by a terminating TEST_ARG_END.
+ *
+ * After this, the instruction to be tested is defined with TEST_INSTRUCTION.
+ * Or for branches, TEST_BRANCH_B and TEST_BRANCH_F (branch forwards/backwards).
+ *
+ * Some specific test cases may make use of other custom constructs.
+ */
+
+#if VERBOSE
+#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
+#else
+#define verbose(fmt, ...)
+#endif
+
+#define TEST_GROUP(title)					\
+	verbose("\n");						\
+	verbose(title"\n");					\
+	verbose("---------------------------------------------------------\n");
+
+#define TESTCASE_START(title)					\
+	__asm__ __volatile__ (					\
+	"bl	__kprobes_test_case_start		\n\t"	\
+	/* don't use .asciz here as 'title' may be */		\
+	/* multiple strings to be concatenated.  */		\
+	".ascii "#title"				\n\t"	\
+	".byte	0					\n\t"	\
+	".align	2					\n\t"
+
+#define	TEST_ARG_REG(reg, val)					\
+	".byte	"__stringify(ARG_TYPE_REG)"		\n\t"	\
+	".byte	"#reg"					\n\t"	\
+	".short	0					\n\t"	\
+	".word	"#val"					\n\t"
+
+#define	TEST_ARG_PTR(reg, val)					\
+	".byte	"__stringify(ARG_TYPE_PTR)"		\n\t"	\
+	".byte	"#reg"					\n\t"	\
+	".short	0					\n\t"	\
+	".word	"#val"					\n\t"
+
+#define	TEST_ARG_MEM(index, val)				\
+	".byte	"__stringify(ARG_TYPE_MEM)"		\n\t"	\
+	".byte	"#index"				\n\t"	\
+	".short	0					\n\t"	\
+	".word	"#val"					\n\t"
+
+#define	TEST_ARG_END(flags)					\
+	".byte	"__stringify(ARG_TYPE_END)"		\n\t"	\
+	".byte	"TEST_ISA flags"			\n\t"	\
+	".short	50f-0f					\n\t"	\
+	".short	2f-0f					\n\t"	\
+	".short	99f-0f					\n\t"	\
+	".code "TEST_ISA"				\n\t"	\
+	"0:						\n\t"
+
+#define TEST_INSTRUCTION(instruction)				\
+	"50:	nop					\n\t"	\
+	"1:	"instruction"				\n\t"	\
+	"	nop					\n\t"
+
+#define TEST_BRANCH_F(instruction)				\
+	TEST_INSTRUCTION(instruction)				\
+	"	b	99f				\n\t"	\
+	"2:	nop					\n\t"
+
+#define TEST_BRANCH_B(instruction)				\
+	"	b	50f				\n\t"	\
+	"	b	99f				\n\t"	\
+	"2:	nop					\n\t"	\
+	"	b	99f				\n\t"	\
+	TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex)			\
+	TEST_INSTRUCTION(instruction)				\
+	"	b	99f				\n\t"	\
+	codex"						\n\t"	\
+	"	b	99f				\n\t"	\
+	"2:	nop					\n\t"
+
+#define TEST_BRANCH_BX(instruction, codex)			\
+	"	b	50f				\n\t"	\
+	"	b	99f				\n\t"	\
+	"2:	nop					\n\t"	\
+	"	b	99f				\n\t"	\
+	codex"						\n\t"	\
+	TEST_INSTRUCTION(instruction)
+
+#define TESTCASE_END						\
+	"2:						\n\t"	\
+	"99:						\n\t"	\
+	"	bl __kprobes_test_case_end_"TEST_ISA"	\n\t"	\
+	".code "NORMAL_ISA"				\n\t"	\
+	: :							\
+	: "r0", "r1", "r2", "r3", "ip", "lr", "memory", "cc"	\
+	);
+
+
+/*
+ * Macros to define test cases.
+ *
+ * Those of the form TEST_{R,P,M}* can be used to define test cases
+ * which take combinations of the three basic types of arguments. E.g.
+ *
+ *   TEST_R	One register argument
+ *   TEST_RR	Two register arguments
+ *   TEST_RPR	A register, a pointer, then a register argument
+ *
+ * For testing instructions which may branch, there are macros TEST_BF_*
+ * and TEST_BB_* for branching forwards and backwards.
+ *
+ * TEST_SUPPORTED and TEST_UNSUPPORTED don't cause the code to be executed,
+ * the just verify that a kprobe is or is not allowed on the given instruction.
+ */
+
+#define TEST(code)				\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_INSTRUCTION(code)			\
+	TESTCASE_END
+
+#define TEST_UNSUPPORTED(code)					\
+	TESTCASE_START(code)					\
+	TEST_ARG_END("|"__stringify(ARG_FLAG_UNSUPPORTED))	\
+	TEST_INSTRUCTION(code)					\
+	TESTCASE_END
+
+#define TEST_SUPPORTED(code)					\
+	TESTCASE_START(code)					\
+	TEST_ARG_END("|"__stringify(ARG_FLAG_SUPPORTED))	\
+	TEST_INSTRUCTION(code)					\
+	TESTCASE_END
+
+#define TEST_R(code1, reg, val, code2)			\
+	TESTCASE_START(code1 #reg code2)		\
+	TEST_ARG_REG(reg, val)				\
+	TEST_ARG_END("")				\
+	TEST_INSTRUCTION(code1 #reg code2)		\
+	TESTCASE_END
+
+#define TEST_RR(code1, reg1, val1, code2, reg2, val2, code3)	\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3)		\
+	TEST_ARG_REG(reg1, val1)				\
+	TEST_ARG_REG(reg2, val2)				\
+	TEST_ARG_END("")					\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3)		\
+	TESTCASE_END
+
+#define TEST_RRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TEST_ARG_REG(reg1, val1)						\
+	TEST_ARG_REG(reg2, val2)						\
+	TEST_ARG_REG(reg3, val3)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TESTCASE_END
+
+#define TEST_RRRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4, reg4, val4)	\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4)		\
+	TEST_ARG_REG(reg1, val1)						\
+	TEST_ARG_REG(reg2, val2)						\
+	TEST_ARG_REG(reg3, val3)						\
+	TEST_ARG_REG(reg4, val4)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4)	\
+	TESTCASE_END
+
+#define TEST_P(code1, reg1, val1, code2)	\
+	TESTCASE_START(code1 #reg1 code2)	\
+	TEST_ARG_PTR(reg1, val1)		\
+	TEST_ARG_END("")			\
+	TEST_INSTRUCTION(code1 #reg1 code2)	\
+	TESTCASE_END
+
+#define TEST_PR(code1, reg1, val1, code2, reg2, val2, code3)	\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3)		\
+	TEST_ARG_PTR(reg1, val1)				\
+	TEST_ARG_REG(reg2, val2)				\
+	TEST_ARG_END("")					\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3)		\
+	TESTCASE_END
+
+#define TEST_RP(code1, reg1, val1, code2, reg2, val2, code3)	\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3)		\
+	TEST_ARG_REG(reg1, val1)				\
+	TEST_ARG_PTR(reg2, val2)				\
+	TEST_ARG_END("")					\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3)		\
+	TESTCASE_END
+
+#define TEST_PRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TEST_ARG_PTR(reg1, val1)						\
+	TEST_ARG_REG(reg2, val2)						\
+	TEST_ARG_REG(reg3, val3)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TESTCASE_END
+
+#define TEST_RPR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TEST_ARG_REG(reg1, val1)						\
+	TEST_ARG_PTR(reg2, val2)						\
+	TEST_ARG_REG(reg3, val3)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TESTCASE_END
+
+#define TEST_RRP(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TEST_ARG_REG(reg1, val1)						\
+	TEST_ARG_REG(reg2, val2)						\
+	TEST_ARG_PTR(reg3, val3)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4)		\
+	TESTCASE_END
+
+#define TEST_BF_P(code1, reg1, val1, code2)	\
+	TESTCASE_START(code1 #reg1 code2)	\
+	TEST_ARG_PTR(reg1, val1)		\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_F(code1 #reg1 code2)	\
+	TESTCASE_END
+
+#define TEST_BF(code)				\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_F(code)			\
+	TESTCASE_END
+
+#define TEST_BB(code)				\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_B(code)			\
+	TESTCASE_END
+
+#define TEST_BF_R(code1, reg, val, code2)	\
+	TESTCASE_START(code1 #reg code2)	\
+	TEST_ARG_REG(reg, val)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_F(code1 #reg code2)		\
+	TESTCASE_END
+
+#define TEST_BB_R(code1, reg, val, code2)	\
+	TESTCASE_START(code1 #reg code2)	\
+	TEST_ARG_REG(reg, val)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_B(code1 #reg code2)		\
+	TESTCASE_END
+
+#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3)	\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3)		\
+	TEST_ARG_REG(reg1, val1)				\
+	TEST_ARG_REG(reg2, val2)				\
+	TEST_ARG_END("")					\
+	TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3)		\
+	TESTCASE_END
+
+#define TEST_BF_X(code, codex)			\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_FX(code, codex)		\
+	TESTCASE_END
+
+#define TEST_BB_X(code, codex)			\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_BRANCH_BX(code, codex)		\
+	TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex)	\
+	TESTCASE_START(code1 #reg code2)		\
+	TEST_ARG_REG(reg, val)				\
+	TEST_ARG_END("")				\
+	TEST_BRANCH_FX(code1 #reg code2, codex)		\
+	TESTCASE_END
+
+#define TEST_X(code, codex)			\
+	TESTCASE_START(code)			\
+	TEST_ARG_END("")			\
+	TEST_INSTRUCTION(code)			\
+	"	b	99f		\n\t"	\
+	"	"codex"			\n\t"	\
+	TESTCASE_END
+
+#define TEST_RX(code1, reg, val, code2, codex)		\
+	TESTCASE_START(code1 #reg code2)		\
+	TEST_ARG_REG(reg, val)				\
+	TEST_ARG_END("")				\
+	TEST_INSTRUCTION(code1 __stringify(reg) code2)	\
+	"	b	99f		\n\t"		\
+	"	"codex"			\n\t"		\
+	TESTCASE_END
+
+#define TEST_RRX(code1, reg1, val1, code2, reg2, val2, code3, codex)		\
+	TESTCASE_START(code1 #reg1 code2 #reg2 code3)				\
+	TEST_ARG_REG(reg1, val1)						\
+	TEST_ARG_REG(reg2, val2)						\
+	TEST_ARG_END("")							\
+	TEST_INSTRUCTION(code1 __stringify(reg1) code2 __stringify(reg2) code3)	\
+	"	b	99f		\n\t"					\
+	"	"codex"			\n\t"					\
+	TESTCASE_END
+
+
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x)	x x
+#define SPACE_0x8	TWICE(".space 4\n\t")
+#define SPACE_0x10	TWICE(SPACE_0x8)
+#define SPACE_0x20	TWICE(SPACE_0x10)
+#define SPACE_0x40	TWICE(SPACE_0x20)
+#define SPACE_0x80	TWICE(SPACE_0x40)
+#define SPACE_0x100	TWICE(SPACE_0x80)
+#define SPACE_0x200	TWICE(SPACE_0x100)
+#define SPACE_0x400	TWICE(SPACE_0x200)
+#define SPACE_0x800	TWICE(SPACE_0x400)
+#define SPACE_0x1000	TWICE(SPACE_0x800)
+
+
+/* Various values used in test cases... */
+#define N(val)	(val ^ 0xffffffff)
+#define VAL1	0x12345678
+#define VAL2	N(VAL1)
+#define VAL3	0xa5f801
+#define VAL4	N(VAL3)
+#define VALM	0x456789ab
+#define VALR	0xdeaddead
+#define HH1	0x0123fecb
+#define HH2	0xa9874567
+
+
+#ifdef CONFIG_THUMB2_KERNEL
+void kprobe_thumb16_test_cases(void);
+void kprobe_thumb32_test_cases(void);
+#else
+void kprobe_arm_test_cases(void);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-thumb.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-thumb.c
new file mode 100644
index 0000000..8f96ec7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes-thumb.c
@@ -0,0 +1,1469 @@
+/*
+ * arch/arm/kernel/kprobes-thumb.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+
+#include "kprobes.h"
+
+
+/*
+ * True if current instruction is in an IT block.
+ */
+#define in_it_block(cpsr)	((cpsr & 0x06000c00) != 0x00000000)
+
+/*
+ * Return the condition code to check for the currently executing instruction.
+ * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if
+ * in_it_block returns true.
+ */
+#define current_cond(cpsr)	((cpsr >> 12) & 0xf)
+
+/*
+ * Return the PC value for a probe in thumb code.
+ * This is the address of the probed instruction plus 4.
+ * We subtract one because the address will have bit zero set to indicate
+ * a pointer to thumb code.
+ */
+static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p)
+{
+	return (unsigned long)p->addr - 1 + 4;
+}
+
+static void __kprobes
+t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn];
+	unsigned long rmv = regs->uregs[rm];
+	unsigned int halfwords;
+
+	if (insn & 0x10) /* TBH */
+		halfwords = ((u16 *)rnv)[rmv];
+	else /* TBB */
+		halfwords = ((u8 *)rnv)[rmv];
+
+	regs->ARM_pc = pc + 2 * halfwords;
+}
+
+static void __kprobes
+t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 8) & 0xf;
+	unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+	regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
+static void __kprobes
+t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+
+	long offset = insn & 0x7ff;		/* imm11 */
+	offset += (insn & 0x003f0000) >> 5;	/* imm6 */
+	offset += (insn & 0x00002000) << 4;	/* J1 */
+	offset += (insn & 0x00000800) << 7;	/* J2 */
+	offset -= (insn & 0x04000000) >> 7;	/* Apply sign bit */
+
+	regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum kprobe_insn __kprobes
+t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	int cc = (insn >> 22) & 0xf;
+	asi->insn_check_cc = kprobe_condition_checks[cc];
+	asi->insn_handler = t32_simulate_cond_branch;
+	return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t32_simulate_branch(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+
+	long offset = insn & 0x7ff;		/* imm11 */
+	offset += (insn & 0x03ff0000) >> 5;	/* imm10 */
+	offset += (insn & 0x00002000) << 9;	/* J1 */
+	offset += (insn & 0x00000800) << 10;	/* J2 */
+	if (insn & 0x04000000)
+		offset -= 0x00800000; /* Apply sign bit */
+	else
+		offset ^= 0x00600000; /* Invert J1 and J2 */
+
+	if (insn & (1 << 14)) {
+		/* BL or BLX */
+		regs->ARM_lr = (unsigned long)p->addr + 4;
+		if (!(insn & (1 << 12))) {
+			/* BLX so switch to ARM mode */
+			regs->ARM_cpsr &= ~PSR_T_BIT;
+			pc &= ~3;
+		}
+	}
+
+	regs->ARM_pc = pc + (offset * 2);
+}
+
+static void __kprobes
+t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long addr = thumb_probe_pc(p) & ~3;
+	int rt = (insn >> 12) & 0xf;
+	unsigned long rtv;
+
+	long offset = insn & 0xfff;
+	if (insn & 0x00800000)
+		addr += offset;
+	else
+		addr -= offset;
+
+	if (insn & 0x00400000) {
+		/* LDR */
+		rtv = *(unsigned long *)addr;
+		if (rt == 15) {
+			bx_write_pc(rtv, regs);
+			return;
+		}
+	} else if (insn & 0x00200000) {
+		/* LDRH */
+		if (insn & 0x01000000)
+			rtv = *(s16 *)addr;
+		else
+			rtv = *(u16 *)addr;
+	} else {
+		/* LDRB */
+		if (insn & 0x01000000)
+			rtv = *(s8 *)addr;
+		else
+			rtv = *(u8 *)addr;
+	}
+
+	regs->uregs[rt] = rtv;
+}
+
+static enum kprobe_insn __kprobes
+t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
+
+	/* Fixup modified instruction to have halfwords in correct order...*/
+	insn = asi->insn[0];
+	((u16 *)asi->insn)[0] = insn >> 16;
+	((u16 *)asi->insn)[1] = insn & 0xffff;
+
+	return ret;
+}
+
+static void __kprobes
+t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p) & ~3;
+	int rt1 = (insn >> 12) & 0xf;
+	int rt2 = (insn >> 8) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+
+	register unsigned long rt1v asm("r0") = regs->uregs[rt1];
+	register unsigned long rt2v asm("r1") = regs->uregs[rt2];
+	register unsigned long rnv asm("r2") = (rn == 15) ? pc
+							  : regs->uregs[rn];
+
+	__asm__ __volatile__ (
+		"blx    %[fn]"
+		: "=r" (rt1v), "=r" (rt2v), "=r" (rnv)
+		: "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	if (rn != 15)
+		regs->uregs[rn] = rnv; /* Writeback base register */
+	regs->uregs[rt1] = rt1v;
+	regs->uregs[rt2] = rt2v;
+}
+
+static void __kprobes
+t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rt = (insn >> 12) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rtv asm("r0") = regs->uregs[rt];
+	register unsigned long rnv asm("r2") = regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		"blx    %[fn]"
+		: "=r" (rtv), "=r" (rnv)
+		: "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rn] = rnv; /* Writeback base register */
+	if (rt == 15) /* Can't be true for a STR as they aren't allowed */
+		bx_write_pc(rtv, regs);
+	else
+		regs->uregs[rt] = rtv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 8) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rdv asm("r1") = regs->uregs[rd];
+	register unsigned long rnv asm("r2") = regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		"blx    %[fn]			\n\t"
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdv), [cpsr] "=r" (cpsr)
+		: "0" (rdv), "r" (rnv), "r" (rmv),
+		  "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	int rd = (insn >> 8) & 0xf;
+
+	register unsigned long rdv asm("r1") = regs->uregs[rd];
+	register unsigned long rnv asm("r2") = pc & ~3;
+
+	__asm__ __volatile__ (
+		"blx    %[fn]"
+		: "=r" (rdv)
+		: "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rd = (insn >> 8) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+
+	register unsigned long rdv asm("r1") = regs->uregs[rd];
+	register unsigned long rnv asm("r2") = regs->uregs[rn];
+
+	__asm__ __volatile__ (
+		"blx    %[fn]"
+		: "=r" (rdv)
+		: "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rdlo = (insn >> 12) & 0xf;
+	int rdhi = (insn >> 8) & 0xf;
+	int rn = (insn >> 16) & 0xf;
+	int rm = insn & 0xf;
+
+	register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+	register unsigned long rdhiv asm("r1") = regs->uregs[rdhi];
+	register unsigned long rnv asm("r2") = regs->uregs[rn];
+	register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		"blx    %[fn]"
+		: "=r" (rdlov), "=r" (rdhiv)
+		: "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+		  [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	regs->uregs[rdlo] = rdlov;
+	regs->uregs[rdhi] = rdhiv;
+}
+
+/* These emulation encodings are functionally equivalent... */
+#define t32_emulate_rd8rn16rm0ra12_noflags \
+		t32_emulate_rdlo12rdhi8rn16rm0_noflags
+
+static const union decode_item t32_table_1110_100x_x0xx[] = {
+	/* Load/store multiple instructions */
+
+	/* Rn is PC		1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfe4f0000, 0xe80f0000),
+
+	/* SRS			1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */
+	/* RFE			1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffc00000, 0xe8000000),
+	/* SRS			1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */
+	/* RFE			1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffc00000, 0xe9800000),
+
+	/* STM Rn, {...pc}	1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfe508000, 0xe8008000),
+	/* LDM Rn, {...lr,pc}	1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfe50c000, 0xe810c000),
+	/* LDM/STM Rn, {...sp}	1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfe402000, 0xe8002000),
+
+	/* STMIA		1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */
+	/* LDMIA		1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */
+	/* STMDB		1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */
+	/* LDMDB		1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_CUSTOM	(0xfe400000, 0xe8000000, t32_decode_ldmstm),
+
+	DECODE_END
+};
+
+static const union decode_item t32_table_1110_100x_x1xx[] = {
+	/* Load/store dual, load/store exclusive, table branch */
+
+	/* STRD (immediate)	1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRD (immediate)	1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_OR	(0xff600000, 0xe8600000),
+	/* STRD (immediate)	1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRD (immediate)	1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xff400000, 0xe9400000, t32_emulate_ldrdstrd,
+						 REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)),
+
+	/* TBB			1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */
+	/* TBH			1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */
+	DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch,
+						 REGS(NOSP, 0, 0, 0, NOSPPC)),
+
+	/* STREX		1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */
+	/* LDREX		1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */
+	/* STREXB		1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */
+	/* STREXH		1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */
+	/* STREXD		1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */
+	/* LDREXB		1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */
+	/* LDREXH		1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */
+	/* LDREXD		1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */
+	/* And unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item t32_table_1110_101x[] = {
+	/* Data-processing (shifted register)				*/
+
+	/* TST			1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */
+	/* TEQ			1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */
+	DECODE_EMULATEX	(0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, 0, 0, NOSPPC)),
+
+	/* CMN			1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */
+	DECODE_OR	(0xfff00f00, 0xeb100f00),
+	/* CMP			1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */
+	DECODE_EMULATEX	(0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOPC, 0, 0, 0, NOSPPC)),
+
+	/* MOV			1110 1010 010x 1111 xxxx xxxx xxxx xxxx */
+	/* MVN			1110 1010 011x 1111 xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(0, 0, NOSPPC, 0, NOSPPC)),
+
+	/* ???			1110 1010 101x xxxx xxxx xxxx xxxx xxxx */
+	/* ???			1110 1010 111x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffa00000, 0xeaa00000),
+	/* ???			1110 1011 001x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffe00000, 0xeb200000),
+	/* ???			1110 1011 100x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffe00000, 0xeb800000),
+	/* ???			1110 1011 111x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xffe00000, 0xebe00000),
+
+	/* ADD/SUB SP, SP, Rm, LSL #0..3				*/
+	/*			1110 1011 x0xx 1101 x000 1101 xx00 xxxx */
+	DECODE_EMULATEX	(0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(SP, 0, SP, 0, NOSPPC)),
+
+	/* ADD/SUB SP, SP, Rm, shift					*/
+	/*			1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */
+	DECODE_REJECT	(0xff4f0f00, 0xeb0d0d00),
+
+	/* ADD/SUB Rd, SP, Rm, shift					*/
+	/*			1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(SP, 0, NOPC, 0, NOSPPC)),
+
+	/* AND			1110 1010 000x xxxx xxxx xxxx xxxx xxxx */
+	/* BIC			1110 1010 001x xxxx xxxx xxxx xxxx xxxx */
+	/* ORR			1110 1010 010x xxxx xxxx xxxx xxxx xxxx */
+	/* ORN			1110 1010 011x xxxx xxxx xxxx xxxx xxxx */
+	/* EOR			1110 1010 100x xxxx xxxx xxxx xxxx xxxx */
+	/* PKH			1110 1010 110x xxxx xxxx xxxx xxxx xxxx */
+	/* ADD			1110 1011 000x xxxx xxxx xxxx xxxx xxxx */
+	/* ADC			1110 1011 010x xxxx xxxx xxxx xxxx xxxx */
+	/* SBC			1110 1011 011x xxxx xxxx xxxx xxxx xxxx */
+	/* SUB			1110 1011 101x xxxx xxxx xxxx xxxx xxxx */
+	/* RSB			1110 1011 110x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_0x0x___0[] = {
+	/* Data-processing (modified immediate)				*/
+
+	/* TST			1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */
+	/* TEQ			1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */
+	DECODE_EMULATEX	(0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, 0, 0, 0)),
+
+	/* CMN			1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */
+	DECODE_OR	(0xfbf08f00, 0xf1100f00),
+	/* CMP			1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */
+	DECODE_EMULATEX	(0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOPC, 0, 0, 0, 0)),
+
+	/* MOV			1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */
+	/* MVN			1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(0, 0, NOSPPC, 0, 0)),
+
+	/* ???			1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfbe08000, 0xf0a00000),
+	/* ???			1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */
+	/* ???			1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfbc08000, 0xf0c00000),
+	/* ???			1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfbe08000, 0xf1200000),
+	/* ???			1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfbe08000, 0xf1800000),
+	/* ???			1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfbe08000, 0xf1e00000),
+
+	/* ADD Rd, SP, #imm	1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */
+	/* SUB Rd, SP, #imm	1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(SP, 0, NOPC, 0, 0)),
+
+	/* AND			1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */
+	/* BIC			1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */
+	/* ORR			1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */
+	/* ORN			1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */
+	/* EOR			1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */
+	/* ADD			1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */
+	/* ADC			1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */
+	/* SBC			1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */
+	/* SUB			1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */
+	/* RSB			1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_0x1x___0[] = {
+	/* Data-processing (plain binary immediate)			*/
+
+	/* ADDW Rd, PC, #imm	1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */
+	DECODE_OR	(0xfbff8000, 0xf20f0000),
+	/* SUBW	Rd, PC, #imm	1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags,
+						 REGS(PC, 0, NOSPPC, 0, 0)),
+
+	/* ADDW SP, SP, #imm	1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */
+	DECODE_OR	(0xfbff8f00, 0xf20d0d00),
+	/* SUBW	SP, SP, #imm	1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */
+	DECODE_EMULATEX	(0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags,
+						 REGS(SP, 0, SP, 0, 0)),
+
+	/* ADDW			1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_OR	(0xfbf08000, 0xf2000000),
+	/* SUBW			1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags,
+						 REGS(NOPCX, 0, NOSPPC, 0, 0)),
+
+	/* MOVW			1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */
+	/* MOVT			1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags,
+						 REGS(0, 0, NOSPPC, 0, 0)),
+
+	/* SSAT16		1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */
+	/* SSAT			1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */
+	/* USAT16		1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */
+	/* USAT			1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+	/* SFBX			1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */
+	/* UFBX			1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+	/* BFC			1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags,
+						 REGS(0, 0, NOSPPC, 0, 0)),
+
+	/* BFI			1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags,
+						 REGS(NOSPPCX, 0, NOSPPC, 0, 0)),
+
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_0xxx___1[] = {
+	/* Branches and miscellaneous control				*/
+
+	/* YIELD		1111 0011 1010 xxxx 10x0 x000 0000 0001 */
+	DECODE_OR	(0xfff0d7ff, 0xf3a08001),
+	/* SEV			1111 0011 1010 xxxx 10x0 x000 0000 0100 */
+	DECODE_EMULATE	(0xfff0d7ff, 0xf3a08004, kprobe_emulate_none),
+	/* NOP			1111 0011 1010 xxxx 10x0 x000 0000 0000 */
+	/* WFE			1111 0011 1010 xxxx 10x0 x000 0000 0010 */
+	/* WFI			1111 0011 1010 xxxx 10x0 x000 0000 0011 */
+	DECODE_SIMULATE	(0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop),
+
+	/* MRS Rd, CPSR		1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */
+	DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs,
+						 REGS(0, 0, NOSPPC, 0, 0)),
+
+	/*
+	 * Unsupported instructions
+	 *			1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx
+	 *
+	 * MSR			1111 0011 100x xxxx 10x0 xxxx xxxx xxxx
+	 * DBG hint		1111 0011 1010 xxxx 10x0 x000 1111 xxxx
+	 * Unallocated hints	1111 0011 1010 xxxx 10x0 x000 xxxx xxxx
+	 * CPS			1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx
+	 * CLREX/DSB/DMB/ISB	1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx
+	 * BXJ			1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx
+	 * SUBS PC,LR,#<imm8>	1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx
+	 * MRS Rd, SPSR		1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx
+	 * SMC			1111 0111 1111 xxxx 1000 xxxx xxxx xxxx
+	 * UNDEFINED		1111 0111 1111 xxxx 1010 xxxx xxxx xxxx
+	 * ???			1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx
+	 */
+	DECODE_REJECT	(0xfb80d000, 0xf3808000),
+
+	/* Bcc			1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */
+	DECODE_CUSTOM	(0xf800d000, 0xf0008000, t32_decode_cond_branch),
+
+	/* BLX			1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */
+	DECODE_OR	(0xf800d001, 0xf000c000),
+	/* B			1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */
+	/* BL			1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */
+	DECODE_SIMULATE	(0xf8009000, 0xf0009000, t32_simulate_branch),
+
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_100x_x0x1__1111[] = {
+	/* Memory hints							*/
+
+	/* PLD (literal)	1111 1000 x001 1111 1111 xxxx xxxx xxxx */
+	/* PLI (literal)	1111 1001 x001 1111 1111 xxxx xxxx xxxx */
+	DECODE_SIMULATE	(0xfe7ff000, 0xf81ff000, kprobe_simulate_nop),
+
+	/* PLD{W} (immediate)	1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */
+	DECODE_OR	(0xffd0f000, 0xf890f000),
+	/* PLD{W} (immediate)	1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */
+	DECODE_OR	(0xffd0ff00, 0xf810fc00),
+	/* PLI (immediate)	1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */
+	DECODE_OR	(0xfff0f000, 0xf990f000),
+	/* PLI (immediate)	1111 1001 0001 xxxx 1111 1100 xxxx xxxx */
+	DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop,
+						 REGS(NOPCX, 0, 0, 0, 0)),
+
+	/* PLD{W} (register)	1111 1000 00x1 xxxx 1111 0000 00xx xxxx */
+	DECODE_OR	(0xffd0ffc0, 0xf810f000),
+	/* PLI (register)	1111 1001 0001 xxxx 1111 0000 00xx xxxx */
+	DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop,
+						 REGS(NOPCX, 0, 0, 0, NOSPPC)),
+
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_100x[] = {
+	/* Store/Load single data item					*/
+
+	/* ???			1111 100x x11x xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfe600000, 0xf8600000),
+
+	/* ???			1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xfff00000, 0xf9500000),
+
+	/* ???			1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */
+	DECODE_REJECT	(0xfe800d00, 0xf8000800),
+
+	/* STRBT		1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */
+	/* STRHT		1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */
+	/* STRT			1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */
+	/* LDRBT		1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */
+	/* LDRSBT		1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */
+	/* LDRHT		1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */
+	/* LDRSHT		1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */
+	/* LDRT			1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */
+	DECODE_REJECT	(0xfe800f00, 0xf8000e00),
+
+	/* STR{,B,H} Rn,[PC...]	1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */
+	DECODE_REJECT	(0xff1f0000, 0xf80f0000),
+
+	/* STR{,B,H} PC,[Rn...]	1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */
+	DECODE_REJECT	(0xff10f000, 0xf800f000),
+
+	/* LDR (literal)	1111 1000 x101 1111 xxxx xxxx xxxx xxxx */
+	DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal,
+						 REGS(PC, ANY, 0, 0, 0)),
+
+	/* STR (immediate)	1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */
+	/* LDR (immediate)	1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */
+	DECODE_OR	(0xffe00800, 0xf8400800),
+	/* STR (immediate)	1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */
+	/* LDR (immediate)	1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xffe00000, 0xf8c00000, t32_emulate_ldrstr,
+						 REGS(NOPCX, ANY, 0, 0, 0)),
+
+	/* STR (register)	1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
+	/* LDR (register)	1111 1000 0101 xxxx xxxx 0000 00xx xxxx */
+	DECODE_EMULATEX	(0xffe00fc0, 0xf8400000, t32_emulate_ldrstr,
+						 REGS(NOPCX, ANY, 0, 0, NOSPPC)),
+
+	/* LDRB (literal)	1111 1000 x001 1111 xxxx xxxx xxxx xxxx */
+	/* LDRSB (literal)	1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
+	/* LDRH (literal)	1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
+	/* LDRSH (literal)	1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+						 REGS(PC, NOSPPCX, 0, 0, 0)),
+
+	/* STRB (immediate)	1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
+	/* STRH (immediate)	1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */
+	/* LDRB (immediate)	1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */
+	/* LDRSB (immediate)	1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */
+	/* LDRH (immediate)	1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */
+	/* LDRSH (immediate)	1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */
+	DECODE_OR	(0xfec00800, 0xf8000800),
+	/* STRB (immediate)	1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */
+	/* STRH (immediate)	1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRB (immediate)	1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRSB (immediate)	1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRH (immediate)	1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */
+	/* LDRSH (immediate)	1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */
+	DECODE_EMULATEX	(0xfec00000, 0xf8800000, t32_emulate_ldrstr,
+						 REGS(NOPCX, NOSPPCX, 0, 0, 0)),
+
+	/* STRB (register)	1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
+	/* STRH (register)	1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
+	/* LDRB (register)	1111 1000 0001 xxxx xxxx 0000 00xx xxxx */
+	/* LDRSB (register)	1111 1001 0001 xxxx xxxx 0000 00xx xxxx */
+	/* LDRH (register)	1111 1000 0011 xxxx xxxx 0000 00xx xxxx */
+	/* LDRSH (register)	1111 1001 0011 xxxx xxxx 0000 00xx xxxx */
+	DECODE_EMULATEX	(0xfe800fc0, 0xf8000000, t32_emulate_ldrstr,
+						 REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)),
+
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_1010___1111[] = {
+	/* Data-processing (register)					*/
+
+	/* ???			1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */
+	DECODE_REJECT	(0xffe0f080, 0xfa60f080),
+
+	/* SXTH			1111 1010 0000 1111 1111 xxxx 1xxx xxxx */
+	/* UXTH			1111 1010 0001 1111 1111 xxxx 1xxx xxxx */
+	/* SXTB16		1111 1010 0010 1111 1111 xxxx 1xxx xxxx */
+	/* UXTB16		1111 1010 0011 1111 1111 xxxx 1xxx xxxx */
+	/* SXTB			1111 1010 0100 1111 1111 xxxx 1xxx xxxx */
+	/* UXTB			1111 1010 0101 1111 1111 xxxx 1xxx xxxx */
+	DECODE_EMULATEX	(0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(0, 0, NOSPPC, 0, NOSPPC)),
+
+
+	/* ???			1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */
+	DECODE_REJECT	(0xff80f0b0, 0xfa80f030),
+	/* ???			1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */
+	DECODE_REJECT	(0xffb0f080, 0xfab0f000),
+
+	/* SADD16		1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */
+	/* SASX			1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */
+	/* SSAX			1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */
+	/* SSUB16		1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */
+	/* SADD8		1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */
+	/* SSUB8		1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */
+
+	/* QADD16		1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */
+	/* QASX			1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */
+	/* QSAX			1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */
+	/* QSUB16		1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */
+	/* QADD8		1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */
+	/* QSUB8		1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */
+
+	/* SHADD16		1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */
+	/* SHASX		1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */
+	/* SHSAX		1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */
+	/* SHSUB16		1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */
+	/* SHADD8		1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */
+	/* SHSUB8		1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */
+
+	/* UADD16		1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */
+	/* UASX			1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */
+	/* USAX			1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */
+	/* USUB16		1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */
+	/* UADD8		1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */
+	/* USUB8		1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */
+
+	/* UQADD16		1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */
+	/* UQASX		1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */
+	/* UQSAX		1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */
+	/* UQSUB16		1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */
+	/* UQADD8		1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */
+	/* UQSUB8		1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */
+
+	/* UHADD16		1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */
+	/* UHASX		1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */
+	/* UHSAX		1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */
+	/* UHSUB16		1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */
+	/* UHADD8		1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */
+	/* UHSUB8		1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */
+	DECODE_OR	(0xff80f080, 0xfa80f000),
+
+	/* SXTAH		1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */
+	/* UXTAH		1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */
+	/* SXTAB16		1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */
+	/* UXTAB16		1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */
+	/* SXTAB		1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */
+	/* UXTAB		1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */
+	DECODE_OR	(0xff80f080, 0xfa00f080),
+
+	/* QADD			1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */
+	/* QDADD		1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */
+	/* QSUB			1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */
+	/* QDSUB		1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */
+	DECODE_OR	(0xfff0f0c0, 0xfa80f080),
+
+	/* SEL			1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
+	DECODE_OR	(0xfff0f0f0, 0xfaa0f080),
+
+	/* LSL			1111 1010 000x xxxx 1111 xxxx 0000 xxxx */
+	/* LSR			1111 1010 001x xxxx 1111 xxxx 0000 xxxx */
+	/* ASR			1111 1010 010x xxxx 1111 xxxx 0000 xxxx */
+	/* ROR			1111 1010 011x xxxx 1111 xxxx 0000 xxxx */
+	DECODE_EMULATEX	(0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+	/* CLZ			1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
+	DECODE_OR	(0xfff0f0f0, 0xfab0f080),
+
+	/* REV			1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */
+	/* REV16		1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */
+	/* RBIT			1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */
+	/* REVSH		1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */
+	DECODE_EMULATEX	(0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)),
+
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_1011_0[] = {
+	/* Multiply, multiply accumulate, and absolute difference	*/
+
+	/* ???			1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */
+	DECODE_REJECT	(0xfff0f0f0, 0xfb00f010),
+	/* ???			1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */
+	DECODE_REJECT	(0xfff0f0f0, 0xfb70f010),
+
+	/* SMULxy		1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */
+	DECODE_OR	(0xfff0f0c0, 0xfb10f000),
+	/* MUL			1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */
+	/* SMUAD{X}		1111 1011 0010 xxxx 1111 xxxx 000x xxxx */
+	/* SMULWy		1111 1011 0011 xxxx 1111 xxxx 000x xxxx */
+	/* SMUSD{X}		1111 1011 0100 xxxx 1111 xxxx 000x xxxx */
+	/* SMMUL{R}		1111 1011 0101 xxxx 1111 xxxx 000x xxxx */
+	/* USAD8		1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */
+	DECODE_EMULATEX	(0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags,
+						 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+	/* ???			1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */
+	DECODE_REJECT	(0xfff000f0, 0xfb700010),
+
+	/* SMLAxy		1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */
+	DECODE_OR	(0xfff000c0, 0xfb100000),
+	/* MLA			1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */
+	/* MLS			1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */
+	/* SMLAD{X}		1111 1011 0010 xxxx xxxx xxxx 000x xxxx */
+	/* SMLAWy		1111 1011 0011 xxxx xxxx xxxx 000x xxxx */
+	/* SMLSD{X}		1111 1011 0100 xxxx xxxx xxxx 000x xxxx */
+	/* SMMLA{R}		1111 1011 0101 xxxx xxxx xxxx 000x xxxx */
+	/* SMMLS{R}		1111 1011 0110 xxxx xxxx xxxx 000x xxxx */
+	/* USADA8		1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */
+	DECODE_EMULATEX	(0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags,
+						 REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)),
+
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+static const union decode_item t32_table_1111_1011_1[] = {
+	/* Long multiply, long multiply accumulate, and divide		*/
+
+	/* UMAAL		1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */
+	DECODE_OR	(0xfff000f0, 0xfbe00060),
+	/* SMLALxy		1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */
+	DECODE_OR	(0xfff000c0, 0xfbc00080),
+	/* SMLALD{X}		1111 1011 1100 xxxx xxxx xxxx 110x xxxx */
+	/* SMLSLD{X}		1111 1011 1101 xxxx xxxx xxxx 110x xxxx */
+	DECODE_OR	(0xffe000e0, 0xfbc000c0),
+	/* SMULL		1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */
+	/* UMULL		1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */
+	/* SMLAL		1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */
+	/* UMLAL		1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */
+	DECODE_EMULATEX	(0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags,
+						 REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)),
+
+	/* SDIV			1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */
+	/* UDIV			1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */
+	/* Other unallocated instructions...				*/
+	DECODE_END
+};
+
+const union decode_item kprobe_decode_thumb32_table[] = {
+
+	/*
+	 * Load/store multiple instructions
+	 *			1110 100x x0xx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx),
+
+	/*
+	 * Load/store dual, load/store exclusive, table branch
+	 *			1110 100x x1xx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx),
+
+	/*
+	 * Data-processing (shifted register)
+	 *			1110 101x xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfe000000, 0xea000000, t32_table_1110_101x),
+
+	/*
+	 * Coprocessor instructions
+	 *			1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_REJECT	(0xfc000000, 0xec000000),
+
+	/*
+	 * Data-processing (modified immediate)
+	 *			1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfa008000, 0xf0000000, t32_table_1111_0x0x___0),
+
+	/*
+	 * Data-processing (plain binary immediate)
+	 *			1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfa008000, 0xf2000000, t32_table_1111_0x1x___0),
+
+	/*
+	 * Branches and miscellaneous control
+	 *			1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xf8008000, 0xf0008000, t32_table_1111_0xxx___1),
+
+	/*
+	 * Advanced SIMD element or structure load/store instructions
+	 *			1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_REJECT	(0xff100000, 0xf9000000),
+
+	/*
+	 * Memory hints
+	 *			1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111),
+
+	/*
+	 * Store single data item
+	 *			1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx
+	 * Load single data items
+	 *			1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xfe000000, 0xf8000000, t32_table_1111_100x),
+
+	/*
+	 * Data-processing (register)
+	 *			1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xff00f000, 0xfa00f000, t32_table_1111_1010___1111),
+
+	/*
+	 * Multiply, multiply accumulate, and absolute difference
+	 *			1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xff800000, 0xfb000000, t32_table_1111_1011_0),
+
+	/*
+	 * Long multiply, long multiply accumulate, and divide
+	 *			1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xff800000, 0xfb800000, t32_table_1111_1011_1),
+
+	/*
+	 * Coprocessor instructions
+	 *			1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx
+	 */
+	DECODE_END
+};
+#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
+EXPORT_SYMBOL_GPL(kprobe_decode_thumb32_table);
+#endif
+
+static void __kprobes
+t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	int rm = (insn >> 3) & 0xf;
+	unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+	if (insn & (1 << 7)) /* BLX ? */
+		regs->ARM_lr = (unsigned long)p->addr + 2;
+
+	bx_write_pc(rmv, regs);
+}
+
+static void __kprobes
+t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3);
+	long index = insn & 0xff;
+	int rt = (insn >> 8) & 0x7;
+	regs->uregs[rt] = base[index];
+}
+
+static void __kprobes
+t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long* base = (unsigned long *)regs->ARM_sp;
+	long index = insn & 0xff;
+	int rt = (insn >> 8) & 0x7;
+	if (insn & 0x800) /* LDR */
+		regs->uregs[rt] = base[index];
+	else /* STR */
+		base[index] = regs->uregs[rt];
+}
+
+static void __kprobes
+t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long base = (insn & 0x800) ? regs->ARM_sp
+					    : (thumb_probe_pc(p) & ~3);
+	long offset = insn & 0xff;
+	int rt = (insn >> 8) & 0x7;
+	regs->uregs[rt] = base + offset * 4;
+}
+
+static void __kprobes
+t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	long imm = insn & 0x7f;
+	if (insn & 0x80) /* SUB */
+		regs->ARM_sp -= imm * 4;
+	else /* ADD */
+		regs->ARM_sp += imm * 4;
+}
+
+static void __kprobes
+t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	int rn = insn & 0x7;
+	kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn;
+	if (nonzero & 0x800) {
+		long i = insn & 0x200;
+		long imm5 = insn & 0xf8;
+		unsigned long pc = thumb_probe_pc(p);
+		regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2);
+	}
+}
+
+static void __kprobes
+t16_simulate_it(struct kprobe *p, struct pt_regs *regs)
+{
+	/*
+	 * The 8 IT state bits are split into two parts in CPSR:
+	 *	ITSTATE<1:0> are in CPSR<26:25>
+	 *	ITSTATE<7:2> are in CPSR<15:10>
+	 * The new IT state is in the lower byte of insn.
+	 */
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long cpsr = regs->ARM_cpsr;
+	cpsr &= ~PSR_IT_MASK;
+	cpsr |= (insn & 0xfc) << 8;
+	cpsr |= (insn & 0x03) << 25;
+	regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_singlestep_it(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_pc += 2;
+	t16_simulate_it(p, regs);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	asi->insn_singlestep = t16_singlestep_it;
+	return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	long offset = insn & 0x7f;
+	offset -= insn & 0x80; /* Apply sign bit */
+	regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	int cc = (insn >> 8) & 0xf;
+	asi->insn_check_cc = kprobe_condition_checks[cc];
+	asi->insn_handler = t16_simulate_cond_branch;
+	return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_branch(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	long offset = insn & 0x3ff;
+	offset -= insn & 0x400; /* Apply sign bit */
+	regs->ARM_pc = pc + (offset * 2);
+}
+
+static unsigned long __kprobes
+t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs)
+{
+	unsigned long oldcpsr = regs->ARM_cpsr;
+	unsigned long newcpsr;
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[oldcpsr]	\n\t"
+		"ldmia	%[regs], {r0-r7}	\n\t"
+		"blx	%[fn]			\n\t"
+		"stmia	%[regs], {r0-r7}	\n\t"
+		"mrs	%[newcpsr], cpsr	\n\t"
+		: [newcpsr] "=r" (newcpsr)
+		: [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
+		  [fn] "r" (p->ainsn.insn_fn)
+		: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+		  "lr", "memory", "cc"
+		);
+
+	return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK);
+}
+
+static void __kprobes
+t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_cpsr = t16_emulate_loregs(p, regs);
+}
+
+static void __kprobes
+t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs)
+{
+	unsigned long cpsr = t16_emulate_loregs(p, regs);
+	if (!in_it_block(cpsr))
+		regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs)
+{
+	kprobe_opcode_t insn = p->opcode;
+	unsigned long pc = thumb_probe_pc(p);
+	int rdn = (insn & 0x7) | ((insn & 0x80) >> 4);
+	int rm = (insn >> 3) & 0xf;
+
+	register unsigned long rdnv asm("r1");
+	register unsigned long rmv asm("r0");
+	unsigned long cpsr = regs->ARM_cpsr;
+
+	rdnv = (rdn == 15) ? pc : regs->uregs[rdn];
+	rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+	__asm__ __volatile__ (
+		"msr	cpsr_fs, %[cpsr]	\n\t"
+		"blx    %[fn]			\n\t"
+		"mrs	%[cpsr], cpsr		\n\t"
+		: "=r" (rdnv), [cpsr] "=r" (cpsr)
+		: "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+		: "lr", "memory", "cc"
+	);
+
+	if (rdn == 15)
+		rdnv &= ~1;
+
+	regs->uregs[rdn] = rdnv;
+	regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	insn &= ~0x00ff;
+	insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
+	((u16 *)asi->insn)[0] = insn;
+	asi->insn_handler = t16_emulate_hiregs;
+	return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_push(struct kprobe *p, struct pt_regs *regs)
+{
+	__asm__ __volatile__ (
+		"ldr	r9, [%[regs], #13*4]	\n\t"
+		"ldr	r8, [%[regs], #14*4]	\n\t"
+		"ldmia	%[regs], {r0-r7}	\n\t"
+		"blx	%[fn]			\n\t"
+		"str	r9, [%[regs], #13*4]	\n\t"
+		:
+		: [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+		: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
+		  "lr", "memory", "cc"
+		);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	/*
+	 * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}"
+	 * and call it with R9=SP and LR in the register list represented
+	 * by R8.
+	 */
+	((u16 *)asi->insn)[0] = 0xe929;		/* 1st half STMDB R9!,{} */
+	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
+	asi->insn_handler = t16_emulate_push;
+	return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+	__asm__ __volatile__ (
+		"ldr	r9, [%[regs], #13*4]	\n\t"
+		"ldmia	%[regs], {r0-r7}	\n\t"
+		"blx	%[fn]			\n\t"
+		"stmia	%[regs], {r0-r7}	\n\t"
+		"str	r9, [%[regs], #13*4]	\n\t"
+		:
+		: [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+		: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+		  "lr", "memory", "cc"
+		);
+}
+
+static void __kprobes
+t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs)
+{
+	register unsigned long pc asm("r8");
+
+	__asm__ __volatile__ (
+		"ldr	r9, [%[regs], #13*4]	\n\t"
+		"ldmia	%[regs], {r0-r7}	\n\t"
+		"blx	%[fn]			\n\t"
+		"stmia	%[regs], {r0-r7}	\n\t"
+		"str	r9, [%[regs], #13*4]	\n\t"
+		: "=r" (pc)
+		: [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+		: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+		  "lr", "memory", "cc"
+		);
+
+	bx_write_pc(pc, regs);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	/*
+	 * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}"
+	 * and call it with R9=SP and PC in the register list represented
+	 * by R8.
+	 */
+	((u16 *)asi->insn)[0] = 0xe8b9;		/* 1st half LDMIA R9!,{} */
+	((u16 *)asi->insn)[1] = insn & 0x1ff;	/* 2nd half (register list) */
+	asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
+					 : t16_emulate_pop_nopc;
+	return INSN_GOOD;
+}
+
+static const union decode_item t16_table_1011[] = {
+	/* Miscellaneous 16-bit instructions		    */
+
+	/* ADD (SP plus immediate)	1011 0000 0xxx xxxx */
+	/* SUB (SP minus immediate)	1011 0000 1xxx xxxx */
+	DECODE_SIMULATE	(0xff00, 0xb000, t16_simulate_add_sp_imm),
+
+	/* CBZ				1011 00x1 xxxx xxxx */
+	/* CBNZ				1011 10x1 xxxx xxxx */
+	DECODE_SIMULATE	(0xf500, 0xb100, t16_simulate_cbz),
+
+	/* SXTH				1011 0010 00xx xxxx */
+	/* SXTB				1011 0010 01xx xxxx */
+	/* UXTH				1011 0010 10xx xxxx */
+	/* UXTB				1011 0010 11xx xxxx */
+	/* REV				1011 1010 00xx xxxx */
+	/* REV16			1011 1010 01xx xxxx */
+	/* ???				1011 1010 10xx xxxx */
+	/* REVSH			1011 1010 11xx xxxx */
+	DECODE_REJECT	(0xffc0, 0xba80),
+	DECODE_EMULATE	(0xf500, 0xb000, t16_emulate_loregs_rwflags),
+
+	/* PUSH				1011 010x xxxx xxxx */
+	DECODE_CUSTOM	(0xfe00, 0xb400, t16_decode_push),
+	/* POP				1011 110x xxxx xxxx */
+	DECODE_CUSTOM	(0xfe00, 0xbc00, t16_decode_pop),
+
+	/*
+	 * If-Then, and hints
+	 *				1011 1111 xxxx xxxx
+	 */
+
+	/* YIELD			1011 1111 0001 0000 */
+	DECODE_OR	(0xffff, 0xbf10),
+	/* SEV				1011 1111 0100 0000 */
+	DECODE_EMULATE	(0xffff, 0xbf40, kprobe_emulate_none),
+	/* NOP				1011 1111 0000 0000 */
+	/* WFE				1011 1111 0010 0000 */
+	/* WFI				1011 1111 0011 0000 */
+	DECODE_SIMULATE	(0xffcf, 0xbf00, kprobe_simulate_nop),
+	/* Unassigned hints		1011 1111 xxxx 0000 */
+	DECODE_REJECT	(0xff0f, 0xbf00),
+	/* IT				1011 1111 xxxx xxxx */
+	DECODE_CUSTOM	(0xff00, 0xbf00, t16_decode_it),
+
+	/* SETEND			1011 0110 010x xxxx */
+	/* CPS				1011 0110 011x xxxx */
+	/* BKPT				1011 1110 xxxx xxxx */
+	/* And unallocated instructions...		    */
+	DECODE_END
+};
+
+const union decode_item kprobe_decode_thumb16_table[] = {
+
+	/*
+	 * Shift (immediate), add, subtract, move, and compare
+	 *				00xx xxxx xxxx xxxx
+	 */
+
+	/* CMP (immediate)		0010 1xxx xxxx xxxx */
+	DECODE_EMULATE	(0xf800, 0x2800, t16_emulate_loregs_rwflags),
+
+	/* ADD (register)		0001 100x xxxx xxxx */
+	/* SUB (register)		0001 101x xxxx xxxx */
+	/* LSL (immediate)		0000 0xxx xxxx xxxx */
+	/* LSR (immediate)		0000 1xxx xxxx xxxx */
+	/* ASR (immediate)		0001 0xxx xxxx xxxx */
+	/* ADD (immediate, Thumb)	0001 110x xxxx xxxx */
+	/* SUB (immediate, Thumb)	0001 111x xxxx xxxx */
+	/* MOV (immediate)		0010 0xxx xxxx xxxx */
+	/* ADD (immediate, Thumb)	0011 0xxx xxxx xxxx */
+	/* SUB (immediate, Thumb)	0011 1xxx xxxx xxxx */
+	DECODE_EMULATE	(0xc000, 0x0000, t16_emulate_loregs_noitrwflags),
+
+	/*
+	 * 16-bit Thumb data-processing instructions
+	 *				0100 00xx xxxx xxxx
+	 */
+
+	/* TST (register)		0100 0010 00xx xxxx */
+	DECODE_EMULATE	(0xffc0, 0x4200, t16_emulate_loregs_rwflags),
+	/* CMP (register)		0100 0010 10xx xxxx */
+	/* CMN (register)		0100 0010 11xx xxxx */
+	DECODE_EMULATE	(0xff80, 0x4280, t16_emulate_loregs_rwflags),
+	/* AND (register)		0100 0000 00xx xxxx */
+	/* EOR (register)		0100 0000 01xx xxxx */
+	/* LSL (register)		0100 0000 10xx xxxx */
+	/* LSR (register)		0100 0000 11xx xxxx */
+	/* ASR (register)		0100 0001 00xx xxxx */
+	/* ADC (register)		0100 0001 01xx xxxx */
+	/* SBC (register)		0100 0001 10xx xxxx */
+	/* ROR (register)		0100 0001 11xx xxxx */
+	/* RSB (immediate)		0100 0010 01xx xxxx */
+	/* ORR (register)		0100 0011 00xx xxxx */
+	/* MUL				0100 0011 00xx xxxx */
+	/* BIC (register)		0100 0011 10xx xxxx */
+	/* MVN (register)		0100 0011 10xx xxxx */
+	DECODE_EMULATE	(0xfc00, 0x4000, t16_emulate_loregs_noitrwflags),
+
+	/*
+	 * Special data instructions and branch and exchange
+	 *				0100 01xx xxxx xxxx
+	 */
+
+	/* BLX pc			0100 0111 1111 1xxx */
+	DECODE_REJECT	(0xfff8, 0x47f8),
+
+	/* BX (register)		0100 0111 0xxx xxxx */
+	/* BLX (register)		0100 0111 1xxx xxxx */
+	DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx),
+
+	/* ADD pc, pc			0100 0100 1111 1111 */
+	DECODE_REJECT	(0xffff, 0x44ff),
+
+	/* ADD (register)		0100 0100 xxxx xxxx */
+	/* CMP (register)		0100 0101 xxxx xxxx */
+	/* MOV (register)		0100 0110 xxxx xxxx */
+	DECODE_CUSTOM	(0xfc00, 0x4400, t16_decode_hiregs),
+
+	/*
+	 * Load from Literal Pool
+	 * LDR (literal)		0100 1xxx xxxx xxxx
+	 */
+	DECODE_SIMULATE	(0xf800, 0x4800, t16_simulate_ldr_literal),
+
+	/*
+	 * 16-bit Thumb Load/store instructions
+	 *				0101 xxxx xxxx xxxx
+	 *				011x xxxx xxxx xxxx
+	 *				100x xxxx xxxx xxxx
+	 */
+
+	/* STR (register)		0101 000x xxxx xxxx */
+	/* STRH (register)		0101 001x xxxx xxxx */
+	/* STRB (register)		0101 010x xxxx xxxx */
+	/* LDRSB (register)		0101 011x xxxx xxxx */
+	/* LDR (register)		0101 100x xxxx xxxx */
+	/* LDRH (register)		0101 101x xxxx xxxx */
+	/* LDRB (register)		0101 110x xxxx xxxx */
+	/* LDRSH (register)		0101 111x xxxx xxxx */
+	/* STR (immediate, Thumb)	0110 0xxx xxxx xxxx */
+	/* LDR (immediate, Thumb)	0110 1xxx xxxx xxxx */
+	/* STRB (immediate, Thumb)	0111 0xxx xxxx xxxx */
+	/* LDRB (immediate, Thumb)	0111 1xxx xxxx xxxx */
+	DECODE_EMULATE	(0xc000, 0x4000, t16_emulate_loregs_rwflags),
+	/* STRH (immediate, Thumb)	1000 0xxx xxxx xxxx */
+	/* LDRH (immediate, Thumb)	1000 1xxx xxxx xxxx */
+	DECODE_EMULATE	(0xf000, 0x8000, t16_emulate_loregs_rwflags),
+	/* STR (immediate, Thumb)	1001 0xxx xxxx xxxx */
+	/* LDR (immediate, Thumb)	1001 1xxx xxxx xxxx */
+	DECODE_SIMULATE	(0xf000, 0x9000, t16_simulate_ldrstr_sp_relative),
+
+	/*
+	 * Generate PC-/SP-relative address
+	 * ADR (literal)		1010 0xxx xxxx xxxx
+	 * ADD (SP plus immediate)	1010 1xxx xxxx xxxx
+	 */
+	DECODE_SIMULATE	(0xf000, 0xa000, t16_simulate_reladr),
+
+	/*
+	 * Miscellaneous 16-bit instructions
+	 *				1011 xxxx xxxx xxxx
+	 */
+	DECODE_TABLE	(0xf000, 0xb000, t16_table_1011),
+
+	/* STM				1100 0xxx xxxx xxxx */
+	/* LDM				1100 1xxx xxxx xxxx */
+	DECODE_EMULATE	(0xf000, 0xc000, t16_emulate_loregs_rwflags),
+
+	/*
+	 * Conditional branch, and Supervisor Call
+	 */
+
+	/* Permanently UNDEFINED	1101 1110 xxxx xxxx */
+	/* SVC				1101 1111 xxxx xxxx */
+	DECODE_REJECT	(0xfe00, 0xde00),
+
+	/* Conditional branch		1101 xxxx xxxx xxxx */
+	DECODE_CUSTOM	(0xf000, 0xd000, t16_decode_cond_branch),
+
+	/*
+	 * Unconditional branch
+	 * B				1110 0xxx xxxx xxxx
+	 */
+	DECODE_SIMULATE	(0xf800, 0xe000, t16_simulate_branch),
+
+	DECODE_END
+};
+#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
+EXPORT_SYMBOL_GPL(kprobe_decode_thumb16_table);
+#endif
+
+static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
+{
+	if (unlikely(in_it_block(cpsr)))
+		return kprobe_condition_checks[current_cond(cpsr)](cpsr);
+	return true;
+}
+
+static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_pc += 2;
+	p->ainsn.insn_handler(p, regs);
+	regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+}
+
+static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+	regs->ARM_pc += 4;
+	p->ainsn.insn_handler(p, regs);
+	regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+}
+
+enum kprobe_insn __kprobes
+thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	asi->insn_singlestep = thumb16_singlestep;
+	asi->insn_check_cc = thumb_check_cc;
+	return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true);
+}
+
+enum kprobe_insn __kprobes
+thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+	asi->insn_singlestep = thumb32_singlestep;
+	asi->insn_check_cc = thumb_check_cc;
+	return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.c
new file mode 100644
index 0000000..4dd41fc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.c
@@ -0,0 +1,618 @@
+/*
+ * arch/arm/kernel/kprobes.c
+ *
+ * Kprobes on ARM
+ *
+ * Abhishek Sagar <sagar.abhishek@gmail.com>
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * Nicolas Pitre <nico@marvell.com>
+ * Copyright (C) 2007 Marvell Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/stringify.h>
+#include <asm/traps.h>
+#include <asm/cacheflush.h>
+
+#include "kprobes.h"
+#include "patch.h"
+
+#define MIN_STACK_SIZE(addr) 				\
+	min((unsigned long)MAX_STACK_SIZE,		\
+	    (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
+
+#define flush_insns(addr, size)				\
+	flush_icache_range((unsigned long)(addr),	\
+			   (unsigned long)(addr) +	\
+			   (size))
+
+/* Used as a marker in ARM_pc to note when we're in a jprobe. */
+#define JPROBE_MAGIC_ADDR		0xffffffff
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	kprobe_opcode_t insn;
+	kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
+	unsigned long addr = (unsigned long)p->addr;
+	bool thumb;
+	kprobe_decode_insn_t *decode_insn;
+	int is;
+
+	if (in_exception_text(addr))
+		return -EINVAL;
+
+#ifdef CONFIG_THUMB2_KERNEL
+	thumb = true;
+	addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
+	insn = ((u16 *)addr)[0];
+	if (is_wide_instruction(insn)) {
+		insn <<= 16;
+		insn |= ((u16 *)addr)[1];
+		decode_insn = thumb32_kprobe_decode_insn;
+	} else
+		decode_insn = thumb16_kprobe_decode_insn;
+#else /* !CONFIG_THUMB2_KERNEL */
+	thumb = false;
+	if (addr & 0x3)
+		return -EINVAL;
+	insn = *p->addr;
+	decode_insn = arm_kprobe_decode_insn;
+#endif
+
+	p->opcode = insn;
+	p->ainsn.insn = tmp_insn;
+
+	switch ((*decode_insn)(insn, &p->ainsn)) {
+	case INSN_REJECTED:	/* not supported */
+		return -EINVAL;
+
+	case INSN_GOOD:		/* instruction uses slot */
+		p->ainsn.insn = get_insn_slot();
+		if (!p->ainsn.insn)
+			return -ENOMEM;
+		for (is = 0; is < MAX_INSN_SIZE; ++is)
+			p->ainsn.insn[is] = tmp_insn[is];
+		flush_insns(p->ainsn.insn,
+				sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
+		p->ainsn.insn_fn = (kprobe_insn_fn_t *)
+					((uintptr_t)p->ainsn.insn | thumb);
+		break;
+
+	case INSN_GOOD_NO_SLOT:	/* instruction doesn't need insn slot */
+		p->ainsn.insn = NULL;
+		break;
+	}
+
+	return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+	unsigned int brkp;
+	void *addr;
+
+	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+		/* Remove any Thumb flag */
+		addr = (void *)((uintptr_t)p->addr & ~1);
+
+		if (is_wide_instruction(p->opcode))
+			brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
+		else
+			brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
+	} else {
+		kprobe_opcode_t insn = p->opcode;
+
+		addr = p->addr;
+		brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
+
+		if (insn >= 0xe0000000)
+			brkp |= 0xe0000000;  /* Unconditional instruction */
+		else
+			brkp |= insn & 0xf0000000;  /* Copy condition from insn */
+	}
+
+	patch_text(addr, brkp);
+}
+
+/*
+ * The actual disarming is done here on each CPU and synchronized using
+ * stop_machine. This synchronization is necessary on SMP to avoid removing
+ * a probe between the moment the 'Undefined Instruction' exception is raised
+ * and the moment the exception handler reads the faulting instruction from
+ * memory. It is also needed to atomically set the two half-words of a 32-bit
+ * Thumb breakpoint.
+ */
+int __kprobes __arch_disarm_kprobe(void *p)
+{
+	struct kprobe *kp = p;
+	void *addr = (void *)((uintptr_t)kp->addr & ~1);
+
+	__patch_text(addr, kp->opcode);
+
+	return 0;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+	stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+	if (p->ainsn.insn) {
+		free_insn_slot(p->ainsn.insn, 0);
+		p->ainsn.insn = NULL;
+	}
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	kcb->prev_kprobe.kp = kprobe_running();
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+	__get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes
+singlestep_skip(struct kprobe *p, struct pt_regs *regs)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+	if (is_wide_instruction(p->opcode))
+		regs->ARM_pc += 4;
+	else
+		regs->ARM_pc += 2;
+#else
+	regs->ARM_pc += 4;
+#endif
+}
+
+static inline void __kprobes
+singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+	p->ainsn.insn_singlestep(p, regs);
+}
+
+/*
+ * Called with IRQs disabled. IRQs must remain disabled from that point
+ * all the way until processing this kprobe is complete.  The current
+ * kprobes implementation cannot process more than one nested level of
+ * kprobe, and that level is reserved for user kprobe handlers, so we can't
+ * risk encountering a new kprobe in an interrupt handler.
+ */
+void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+	struct kprobe *p, *cur;
+	struct kprobe_ctlblk *kcb;
+
+	kcb = get_kprobe_ctlblk();
+	cur = kprobe_running();
+
+#ifdef CONFIG_THUMB2_KERNEL
+	/*
+	 * First look for a probe which was registered using an address with
+	 * bit 0 set, this is the usual situation for pointers to Thumb code.
+	 * If not found, fallback to looking for one with bit 0 clear.
+	 */
+	p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
+	if (!p)
+		p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+
+#else /* ! CONFIG_THUMB2_KERNEL */
+	p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+#endif
+
+	if (p) {
+		if (cur) {
+			/* Kprobe is pending, so we're recursing. */
+			switch (kcb->kprobe_status) {
+			case KPROBE_HIT_ACTIVE:
+			case KPROBE_HIT_SSDONE:
+				/* A pre- or post-handler probe got us here. */
+				kprobes_inc_nmissed_count(p);
+				save_previous_kprobe(kcb);
+				set_current_kprobe(p);
+				kcb->kprobe_status = KPROBE_REENTER;
+				singlestep(p, regs, kcb);
+				restore_previous_kprobe(kcb);
+				break;
+			default:
+				/* impossible cases */
+				BUG();
+			}
+		} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+			/* Probe hit and conditional execution check ok. */
+			set_current_kprobe(p);
+			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+			/*
+			 * If we have no pre-handler or it returned 0, we
+			 * continue with normal processing.  If we have a
+			 * pre-handler and it returned non-zero, it prepped
+			 * for calling the break_handler below on re-entry,
+			 * so get out doing nothing more here.
+			 */
+			if (!p->pre_handler || !p->pre_handler(p, regs)) {
+				kcb->kprobe_status = KPROBE_HIT_SS;
+				singlestep(p, regs, kcb);
+				if (p->post_handler) {
+					kcb->kprobe_status = KPROBE_HIT_SSDONE;
+					p->post_handler(p, regs, 0);
+				}
+				reset_current_kprobe();
+			}
+		} else {
+			/*
+			 * Probe hit but conditional execution check failed,
+			 * so just skip the instruction and continue as if
+			 * nothing had happened.
+			 */
+			singlestep_skip(p, regs);
+		}
+	} else if (cur) {
+		/* We probably hit a jprobe.  Call its break handler. */
+		if (cur->break_handler && cur->break_handler(cur, regs)) {
+			kcb->kprobe_status = KPROBE_HIT_SS;
+			singlestep(cur, regs, kcb);
+			if (cur->post_handler) {
+				kcb->kprobe_status = KPROBE_HIT_SSDONE;
+				cur->post_handler(cur, regs, 0);
+			}
+		}
+		reset_current_kprobe();
+	} else {
+		/*
+		 * The probe was removed and a race is in progress.
+		 * There is nothing we can do about it.  Let's restart
+		 * the instruction.  By the time we can restart, the
+		 * real instruction will be there.
+		 */
+	}
+}
+
+static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	kprobe_handler(regs);
+	local_irq_restore(flags);
+	return 0;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+	struct kprobe *cur = kprobe_running();
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	switch (kcb->kprobe_status) {
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+		/*
+		 * We are here because the instruction being single
+		 * stepped caused a page fault. We reset the current
+		 * kprobe and the PC to point back to the probe address
+		 * and allow the page fault handler to continue as a
+		 * normal page fault.
+		 */
+		regs->ARM_pc = (long)cur->addr;
+		if (kcb->kprobe_status == KPROBE_REENTER) {
+			restore_previous_kprobe(kcb);
+		} else {
+			reset_current_kprobe();
+		}
+		break;
+
+	case KPROBE_HIT_ACTIVE:
+	case KPROBE_HIT_SSDONE:
+		/*
+		 * We increment the nmissed count for accounting,
+		 * we can also use npre/npostfault count for accounting
+		 * these specific fault cases.
+		 */
+		kprobes_inc_nmissed_count(cur);
+
+		/*
+		 * We come here because instructions in the pre/post
+		 * handler caused the page_fault, this could happen
+		 * if handler tries to access user space by
+		 * copy_from_user(), get_user() etc. Let the
+		 * user-specified handler try to fix it.
+		 */
+		if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+			return 1;
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+				       unsigned long val, void *data)
+{
+	/*
+	 * notify_die() is currently never called on ARM,
+	 * so this callback is currently empty.
+	 */
+	return NOTIFY_DONE;
+}
+
+/*
+ * When a retprobed function returns, trampoline_handler() is called,
+ * calling the kretprobe's handler. We construct a struct pt_regs to
+ * give a view of registers r0-r11 to the user return-handler.  This is
+ * not a complete pt_regs structure, but that should be plenty sufficient
+ * for kretprobe handlers which should normally be interested in r0 only
+ * anyway.
+ */
+void __naked __kprobes kretprobe_trampoline(void)
+{
+	__asm__ __volatile__ (
+		"stmdb	sp!, {r0 - r11}		\n\t"
+		"mov	r0, sp			\n\t"
+		"bl	trampoline_handler	\n\t"
+		"mov	lr, r0			\n\t"
+		"ldmia	sp!, {r0 - r11}		\n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+		"bx	lr			\n\t"
+#else
+		"mov	pc, lr			\n\t"
+#endif
+		: : : "memory");
+}
+
+/* Called from kretprobe_trampoline */
+static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+{
+	struct kretprobe_instance *ri = NULL;
+	struct hlist_head *head, empty_rp;
+	struct hlist_node *node, *tmp;
+	unsigned long flags, orig_ret_address = 0;
+	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+	INIT_HLIST_HEAD(&empty_rp);
+	kretprobe_hash_lock(current, &head, &flags);
+
+	/*
+	 * It is possible to have multiple instances associated with a given
+	 * task either because multiple functions in the call path have
+	 * a return probe installed on them, and/or more than one return
+	 * probe was registered for a target function.
+	 *
+	 * We can handle this because:
+	 *     - instances are always inserted at the head of the list
+	 *     - when multiple return probes are registered for the same
+	 *       function, the first instance's ret_addr will point to the
+	 *       real return address, and all the rest will point to
+	 *       kretprobe_trampoline
+	 */
+	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		if (ri->rp && ri->rp->handler) {
+			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+			ri->rp->handler(ri, regs);
+			__get_cpu_var(current_kprobe) = NULL;
+		}
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address)
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+	}
+
+	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+	kretprobe_hash_unlock(current, &flags);
+
+	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+		hlist_del(&ri->hlist);
+		kfree(ri);
+	}
+
+	return (void *)orig_ret_address;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+				      struct pt_regs *regs)
+{
+	ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
+
+	/* Replace the return addr with trampoline addr. */
+	regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	long sp_addr = regs->ARM_sp;
+	long cpsr;
+
+	kcb->jprobe_saved_regs = *regs;
+	memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+	regs->ARM_pc = (long)jp->entry;
+
+	cpsr = regs->ARM_cpsr | PSR_I_BIT;
+#ifdef CONFIG_THUMB2_KERNEL
+	/* Set correct Thumb state in cpsr */
+	if (regs->ARM_pc & 1)
+		cpsr |= PSR_T_BIT;
+	else
+		cpsr &= ~PSR_T_BIT;
+#endif
+	regs->ARM_cpsr = cpsr;
+
+	preempt_disable();
+	return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+	__asm__ __volatile__ (
+		/*
+		 * Setup an empty pt_regs. Fill SP and PC fields as
+		 * they're needed by longjmp_break_handler.
+		 *
+		 * We allocate some slack between the original SP and start of
+		 * our fabricated regs. To be precise we want to have worst case
+		 * covered which is STMFD with all 16 regs so we allocate 2 *
+		 * sizeof(struct_pt_regs)).
+		 *
+		 * This is to prevent any simulated instruction from writing
+		 * over the regs when they are accessing the stack.
+		 */
+#ifdef CONFIG_THUMB2_KERNEL
+		"sub    r0, %0, %1		\n\t"
+		"mov    sp, r0			\n\t"
+#else
+		"sub    sp, %0, %1		\n\t"
+#endif
+		"ldr    r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
+		"str    %0, [sp, %2]		\n\t"
+		"str    r0, [sp, %3]		\n\t"
+		"mov    r0, sp			\n\t"
+		"bl     kprobe_handler		\n\t"
+
+		/*
+		 * Return to the context saved by setjmp_pre_handler
+		 * and restored by longjmp_break_handler.
+		 */
+#ifdef CONFIG_THUMB2_KERNEL
+		"ldr	lr, [sp, %2]		\n\t" /* lr = saved sp */
+		"ldrd	r0, r1, [sp, %5]	\n\t" /* r0,r1 = saved lr,pc */
+		"ldr	r2, [sp, %4]		\n\t" /* r2 = saved psr */
+		"stmdb	lr!, {r0, r1, r2}	\n\t" /* push saved lr and */
+						      /* rfe context */
+		"ldmia	sp, {r0 - r12}		\n\t"
+		"mov	sp, lr			\n\t"
+		"ldr	lr, [sp], #4		\n\t"
+		"rfeia	sp!			\n\t"
+#else
+		"ldr	r0, [sp, %4]		\n\t"
+		"msr	cpsr_cxsf, r0		\n\t"
+		"ldmia	sp, {r0 - pc}		\n\t"
+#endif
+		:
+		: "r" (kcb->jprobe_saved_regs.ARM_sp),
+		  "I" (sizeof(struct pt_regs) * 2),
+		  "J" (offsetof(struct pt_regs, ARM_sp)),
+		  "J" (offsetof(struct pt_regs, ARM_pc)),
+		  "J" (offsetof(struct pt_regs, ARM_cpsr)),
+		  "J" (offsetof(struct pt_regs, ARM_lr))
+		: "memory", "cc");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
+	long orig_sp = regs->ARM_sp;
+	struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+	if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
+		if (orig_sp != stack_addr) {
+			struct pt_regs *saved_regs =
+				(struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
+			printk("current sp %lx does not match saved sp %lx\n",
+			       orig_sp, stack_addr);
+			printk("Saved registers for jprobe %p\n", jp);
+			show_regs(saved_regs);
+			printk("Current registers\n");
+			show_regs(regs);
+			BUG();
+		}
+		*regs = kcb->jprobe_saved_regs;
+		memcpy((void *)stack_addr, kcb->jprobes_stack,
+		       MIN_STACK_SIZE(stack_addr));
+		preempt_enable_no_resched();
+		return 1;
+	}
+	return 0;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+	return 0;
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+static struct undef_hook kprobes_thumb16_break_hook = {
+	.instr_mask	= 0xffff,
+	.instr_val	= KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
+	.cpsr_mask	= MODE_MASK,
+	.cpsr_val	= SVC_MODE,
+	.fn		= kprobe_trap_handler,
+};
+
+static struct undef_hook kprobes_thumb32_break_hook = {
+	.instr_mask	= 0xffffffff,
+	.instr_val	= KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
+	.cpsr_mask	= MODE_MASK,
+	.cpsr_val	= SVC_MODE,
+	.fn		= kprobe_trap_handler,
+};
+
+#else  /* !CONFIG_THUMB2_KERNEL */
+
+static struct undef_hook kprobes_arm_break_hook = {
+	.instr_mask	= 0x0fffffff,
+	.instr_val	= KPROBE_ARM_BREAKPOINT_INSTRUCTION,
+	.cpsr_mask	= MODE_MASK,
+	.cpsr_val	= SVC_MODE,
+	.fn		= kprobe_trap_handler,
+};
+
+#endif /* !CONFIG_THUMB2_KERNEL */
+
+int __init arch_init_kprobes()
+{
+	arm_kprobe_decode_init();
+#ifdef CONFIG_THUMB2_KERNEL
+	register_undef_hook(&kprobes_thumb16_break_hook);
+	register_undef_hook(&kprobes_thumb32_break_hook);
+#else
+	register_undef_hook(&kprobes_arm_break_hook);
+#endif
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.h
new file mode 100644
index 0000000..38945f7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/kprobes.h
@@ -0,0 +1,428 @@
+/*
+ * arch/arm/kernel/kprobes.h
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes.h which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_H
+#define _ARM_KERNEL_KPROBES_H
+
+/*
+ * These undefined instructions must be unique and
+ * reserved solely for kprobes' use.
+ */
+#define KPROBE_ARM_BREAKPOINT_INSTRUCTION	0x07f001f8
+#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION	0xde18
+#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION	0xf7f0a018
+
+
+enum kprobe_insn {
+	INSN_REJECTED,
+	INSN_GOOD,
+	INSN_GOOD_NO_SLOT
+};
+
+typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t,
+						struct arch_specific_insn *);
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t,
+						struct arch_specific_insn *);
+enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t,
+						struct arch_specific_insn *);
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
+					struct arch_specific_insn *);
+#endif
+
+void __init arm_kprobe_decode_init(void);
+
+extern kprobe_check_cc * const kprobe_condition_checks[16];
+
+
+#if __LINUX_ARM_ARCH__ >= 7
+
+/* str_pc_offset is architecturally defined from ARMv7 onwards */
+#define str_pc_offset 8
+#define find_str_pc_offset()
+
+#else /* __LINUX_ARM_ARCH__ < 7 */
+
+/* We need a run-time check to determine str_pc_offset */
+extern int str_pc_offset;
+void __init find_str_pc_offset(void);
+
+#endif
+
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ *	ITSTATE<1:0> are in CPSR<26:25>
+ *	ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+	{
+	if ((cpsr & 0x06000400) == 0) {
+		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+		cpsr &= ~PSR_IT_MASK;
+	} else {
+		/* We need to shift left ITSTATE<4:0> */
+		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */
+		unsigned long it = cpsr & mask;
+		it <<= 1;
+		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */
+		it &= mask;
+		cpsr &= ~mask;
+		cpsr |= it;
+	}
+	return cpsr;
+}
+
+static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
+{
+	long cpsr = regs->ARM_cpsr;
+	if (pcv & 0x1) {
+		cpsr |= PSR_T_BIT;
+		pcv &= ~0x1;
+	} else {
+		cpsr &= ~PSR_T_BIT;
+		pcv &= ~0x2;	/* Avoid UNPREDICTABLE address allignment */
+	}
+	regs->ARM_cpsr = cpsr;
+	regs->ARM_pc = pcv;
+}
+
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */
+#define load_write_pc_interworks true
+#define test_load_write_pc_interworking()
+
+#else /* __LINUX_ARM_ARCH__ < 6 */
+
+/* We need run-time testing to determine if load_write_pc() should interwork. */
+extern bool load_write_pc_interworks;
+void __init test_load_write_pc_interworking(void);
+
+#endif
+
+static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs)
+{
+	if (load_write_pc_interworks)
+		bx_write_pc(pcv, regs);
+	else
+		regs->ARM_pc = pcv;
+}
+
+
+#if __LINUX_ARM_ARCH__ >= 7
+
+#define alu_write_pc_interworks true
+#define test_alu_write_pc_interworking()
+
+#elif __LINUX_ARM_ARCH__ <= 5
+
+/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */
+#define alu_write_pc_interworks false
+#define test_alu_write_pc_interworking()
+
+#else /* __LINUX_ARM_ARCH__ == 6 */
+
+/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */
+extern bool alu_write_pc_interworks;
+void __init test_alu_write_pc_interworking(void);
+
+#endif /* __LINUX_ARM_ARCH__ == 6 */
+
+static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs)
+{
+	if (alu_write_pc_interworks)
+		bx_write_pc(pcv, regs);
+	else
+		regs->ARM_pc = pcv;
+}
+
+
+void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs);
+void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs);
+
+enum kprobe_insn __kprobes
+kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi);
+
+/*
+ * Test if load/store instructions writeback the address register.
+ * if P (bit 24) == 0 or W (bit 21) == 1
+ */
+#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
+
+/*
+ * The following definitions and macros are used to build instruction
+ * decoding tables for use by kprobe_decode_insn.
+ *
+ * These tables are a concatenation of entries each of which consist of one of
+ * the decode_* structs. All of the fields in every type of decode structure
+ * are of the union type decode_item, therefore the entire decode table can be
+ * viewed as an array of these and declared like:
+ *
+ *	static const union decode_item table_name[] = {};
+ *
+ * In order to construct each entry in the table, macros are used to
+ * initialise a number of sequential decode_item values in a layout which
+ * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct
+ * decode_simulate by initialising four decode_item objects like this...
+ *
+ *	{.bits = _type},
+ *	{.bits = _mask},
+ *	{.bits = _value},
+ *	{.handler = _handler},
+ *
+ * Initialising a specified member of the union means that the compiler
+ * will produce a warning if the argument is of an incorrect type.
+ *
+ * Below is a list of each of the macros used to initialise entries and a
+ * description of the action performed when that entry is matched to an
+ * instruction. A match is found when (instruction & mask) == value.
+ *
+ * DECODE_TABLE(mask, value, table)
+ *	Instruction decoding jumps to parsing the new sub-table 'table'.
+ *
+ * DECODE_CUSTOM(mask, value, decoder)
+ *	The custom function 'decoder' is called to the complete decoding
+ *	of an instruction.
+ *
+ * DECODE_SIMULATE(mask, value, handler)
+ *	Set the probes instruction handler to 'handler', this will be used
+ *	to simulate the instruction when the probe is hit. Decoding returns
+ *	with INSN_GOOD_NO_SLOT.
+ *
+ * DECODE_EMULATE(mask, value, handler)
+ *	Set the probes instruction handler to 'handler', this will be used
+ *	to emulate the instruction when the probe is hit. The modified
+ *	instruction (see below) is placed in the probes instruction slot so it
+ *	may be called by the emulation code. Decoding returns with INSN_GOOD.
+ *
+ * DECODE_REJECT(mask, value)
+ *	Instruction decoding fails with INSN_REJECTED
+ *
+ * DECODE_OR(mask, value)
+ *	This allows the mask/value test of multiple table entries to be
+ *	logically ORed. Once an 'or' entry is matched the decoding action to
+ *	be performed is that of the next entry which isn't an 'or'. E.g.
+ *
+ *		DECODE_OR	(mask1, value1)
+ *		DECODE_OR	(mask2, value2)
+ *		DECODE_SIMULATE	(mask3, value3, simulation_handler)
+ *
+ *	This means that if any of the three mask/value pairs match the
+ *	instruction being decoded, then 'simulation_handler' will be used
+ *	for it.
+ *
+ * Both the SIMULATE and EMULATE macros have a second form which take an
+ * additional 'regs' argument.
+ *
+ *	DECODE_SIMULATEX(mask, value, handler, regs)
+ *	DECODE_EMULATEX	(mask, value, handler, regs)
+ *
+ * These are used to specify what kind of CPU register is encoded in each of the
+ * least significant 5 nibbles of the instruction being decoded. The regs value
+ * is specified using the REGS macro, this takes any of the REG_TYPE_* values
+ * from enum decode_reg_type as arguments; only the '*' part of the name is
+ * given. E.g.
+ *
+ *	REGS(0, ANY, NOPC, 0, ANY)
+ *
+ * This indicates an instruction is encoded like:
+ *
+ *	bits 19..16	ignore
+ *	bits 15..12	any register allowed here
+ *	bits 11.. 8	any register except PC allowed here
+ *	bits  7.. 4	ignore
+ *	bits  3.. 0	any register allowed here
+ *
+ * This register specification is checked after a decode table entry is found to
+ * match an instruction (through the mask/value test). Any invalid register then
+ * found in the instruction will cause decoding to fail with INSN_REJECTED. In
+ * the above example this would happen if bits 11..8 of the instruction were
+ * 1111, indicating R15 or PC.
+ *
+ * As well as checking for legal combinations of registers, this data is also
+ * used to modify the registers encoded in the instructions so that an
+ * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.)
+ *
+ * Here is a real example which matches ARM instructions of the form
+ * "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>"
+ *
+ *	DECODE_EMULATEX	(0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
+ *						 REGS(ANY, ANY, NOPC, 0, ANY)),
+ *						      ^    ^    ^        ^
+ *						      Rn   Rd   Rs       Rm
+ *
+ * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because
+ * Rs == R15
+ *
+ * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the
+ * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into
+ * the kprobes instruction slot. This can then be called later by the handler
+ * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction.
+ */
+
+enum decode_type {
+	DECODE_TYPE_END,
+	DECODE_TYPE_TABLE,
+	DECODE_TYPE_CUSTOM,
+	DECODE_TYPE_SIMULATE,
+	DECODE_TYPE_EMULATE,
+	DECODE_TYPE_OR,
+	DECODE_TYPE_REJECT,
+	NUM_DECODE_TYPES /* Must be last enum */
+};
+
+#define DECODE_TYPE_BITS	4
+#define DECODE_TYPE_MASK	((1 << DECODE_TYPE_BITS) - 1)
+
+enum decode_reg_type {
+	REG_TYPE_NONE = 0, /* Not a register, ignore */
+	REG_TYPE_ANY,	   /* Any register allowed */
+	REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */
+	REG_TYPE_SP,	   /* Register must be SP */
+	REG_TYPE_PC,	   /* Register must be PC */
+	REG_TYPE_NOSP,	   /* Register must not be SP */
+	REG_TYPE_NOSPPC,   /* Register must not be SP or PC */
+	REG_TYPE_NOPC,	   /* Register must not be PC */
+	REG_TYPE_NOPCWB,   /* No PC if load/store write-back flag also set */
+
+	/* The following types are used when the encoding for PC indicates
+	 * another instruction form. This distiction only matters for test
+	 * case coverage checks.
+	 */
+	REG_TYPE_NOPCX,	   /* Register must not be PC */
+	REG_TYPE_NOSPPCX,  /* Register must not be SP or PC */
+
+	/* Alias to allow '0' arg to be used in REGS macro. */
+	REG_TYPE_0 = REG_TYPE_NONE
+};
+
+#define REGS(r16, r12, r8, r4, r0)	\
+	((REG_TYPE_##r16) << 16) +	\
+	((REG_TYPE_##r12) << 12) +	\
+	((REG_TYPE_##r8) << 8) +	\
+	((REG_TYPE_##r4) << 4) +	\
+	(REG_TYPE_##r0)
+
+union decode_item {
+	u32			bits;
+	const union decode_item	*table;
+	kprobe_insn_handler_t	*handler;
+	kprobe_decode_insn_t	*decoder;
+};
+
+
+#define DECODE_END			\
+	{.bits = DECODE_TYPE_END}
+
+
+struct decode_header {
+	union decode_item	type_regs;
+	union decode_item	mask;
+	union decode_item	value;
+};
+
+#define DECODE_HEADER(_type, _mask, _value, _regs)		\
+	{.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)},	\
+	{.bits = (_mask)},					\
+	{.bits = (_value)}
+
+
+struct decode_table {
+	struct decode_header	header;
+	union decode_item	table;
+};
+
+#define DECODE_TABLE(_mask, _value, _table)			\
+	DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0),	\
+	{.table = (_table)}
+
+
+struct decode_custom {
+	struct decode_header	header;
+	union decode_item	decoder;
+};
+
+#define DECODE_CUSTOM(_mask, _value, _decoder)			\
+	DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0),	\
+	{.decoder = (_decoder)}
+
+
+struct decode_simulate {
+	struct decode_header	header;
+	union decode_item	handler;
+};
+
+#define DECODE_SIMULATEX(_mask, _value, _handler, _regs)		\
+	DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs),	\
+	{.handler = (_handler)}
+
+#define DECODE_SIMULATE(_mask, _value, _handler)	\
+	DECODE_SIMULATEX(_mask, _value, _handler, 0)
+
+
+struct decode_emulate {
+	struct decode_header	header;
+	union decode_item	handler;
+};
+
+#define DECODE_EMULATEX(_mask, _value, _handler, _regs)			\
+	DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs),	\
+	{.handler = (_handler)}
+
+#define DECODE_EMULATE(_mask, _value, _handler)		\
+	DECODE_EMULATEX(_mask, _value, _handler, 0)
+
+
+struct decode_or {
+	struct decode_header	header;
+};
+
+#define DECODE_OR(_mask, _value)				\
+	DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0)
+
+
+struct decode_reject {
+	struct decode_header	header;
+};
+
+#define DECODE_REJECT(_mask, _value)				\
+	DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
+
+
+#ifdef CONFIG_THUMB2_KERNEL
+extern const union decode_item kprobe_decode_thumb16_table[];
+extern const union decode_item kprobe_decode_thumb32_table[];
+#else
+extern const union decode_item kprobe_decode_arm_table[];
+#endif
+
+
+int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+			const union decode_item *table, bool thumb16);
+
+
+#endif /* _ARM_KERNEL_KPROBES_H */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/leds.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/leds.c
new file mode 100644
index 0000000..2050399
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/leds.c
@@ -0,0 +1,145 @@
+/*
+ * LED support code, ripped out of arch/arm/kernel/time.c
+ *
+ *  Copyright (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/syscore_ops.h>
+#include <linux/string.h>
+
+#include <asm/leds.h>
+
+static void dummy_leds_event(led_event_t evt)
+{
+}
+
+void (*leds_event)(led_event_t) = dummy_leds_event;
+
+struct leds_evt_name {
+	const char	name[8];
+	int		on;
+	int		off;
+};
+
+static const struct leds_evt_name evt_names[] = {
+	{ "amber", led_amber_on, led_amber_off },
+	{ "blue",  led_blue_on,  led_blue_off  },
+	{ "green", led_green_on, led_green_off },
+	{ "red",   led_red_on,   led_red_off   },
+};
+
+static ssize_t leds_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret = -EINVAL, len = strcspn(buf, " ");
+
+	if (len > 0 && buf[len] == '\0')
+		len--;
+
+	if (strncmp(buf, "claim", len) == 0) {
+		leds_event(led_claim);
+		ret = size;
+	} else if (strncmp(buf, "release", len) == 0) {
+		leds_event(led_release);
+		ret = size;
+	} else {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
+			if (strlen(evt_names[i].name) != len ||
+			    strncmp(buf, evt_names[i].name, len) != 0)
+				continue;
+			if (strncmp(buf+len, " on", 3) == 0) {
+				leds_event(evt_names[i].on);
+				ret = size;
+			} else if (strncmp(buf+len, " off", 4) == 0) {
+				leds_event(evt_names[i].off);
+				ret = size;
+			}
+			break;
+		}
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(event, 0200, NULL, leds_store);
+
+static struct bus_type leds_subsys = {
+	.name		= "leds",
+	.dev_name	= "leds",
+};
+
+static struct device leds_device = {
+	.id		= 0,
+	.bus		= &leds_subsys,
+};
+
+static int leds_suspend(void)
+{
+	leds_event(led_stop);
+	return 0;
+}
+
+static void leds_resume(void)
+{
+	leds_event(led_start);
+}
+
+static void leds_shutdown(void)
+{
+	leds_event(led_halted);
+}
+
+static struct syscore_ops leds_syscore_ops = {
+	.shutdown	= leds_shutdown,
+	.suspend	= leds_suspend,
+	.resume		= leds_resume,
+};
+
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+                                void *data)
+{
+	switch (val) {
+	case IDLE_START:
+		leds_event(led_idle_start);
+		break;
+	case IDLE_END:
+		leds_event(led_idle_end);
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+	.notifier_call = leds_idle_notifier,
+};
+
+static int __init leds_init(void)
+{
+	int ret;
+	ret = subsys_system_register(&leds_subsys, NULL);
+	if (ret == 0)
+		ret = device_register(&leds_device);
+	if (ret == 0)
+		ret = device_create_file(&leds_device, &dev_attr_event);
+	if (ret == 0) {
+		register_syscore_ops(&leds_syscore_ops);
+		idle_notifier_register(&leds_idle_nb);
+	}
+
+	return ret;
+}
+
+device_initcall(leds_init);
+
+EXPORT_SYMBOL(leds_event);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/machine_kexec.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/machine_kexec.c
new file mode 100644
index 0000000..5f104b8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/machine_kexec.c
@@ -0,0 +1,147 @@
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ */
+
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/mach-types.h>
+#include <asm/system_misc.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+extern unsigned long kexec_mach_type;
+extern unsigned long kexec_boot_atags;
+
+static atomic_t waiting_for_crash_ipi;
+
+/*
+ * Provide a dummy crash_notes definition while crash dump arrives to arm.
+ * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
+ */
+
+int machine_kexec_prepare(struct kimage *image)
+{
+	return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+void machine_crash_nonpanic_core(void *unused)
+{
+	struct pt_regs regs;
+
+	crash_setup_regs(&regs, NULL);
+	printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
+	       smp_processor_id());
+	crash_save_cpu(&regs, smp_processor_id());
+	flush_cache_all();
+
+	atomic_dec(&waiting_for_crash_ipi);
+	while (1)
+		cpu_relax();
+}
+
+static void machine_kexec_mask_interrupts(void)
+{
+	unsigned int i;
+	struct irq_desc *desc;
+
+	for_each_irq_desc(i, desc) {
+		struct irq_chip *chip;
+
+		chip = irq_desc_get_chip(desc);
+		if (!chip)
+			continue;
+
+		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+			chip->irq_eoi(&desc->irq_data);
+
+		if (chip->irq_mask)
+			chip->irq_mask(&desc->irq_data);
+
+		if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+			chip->irq_disable(&desc->irq_data);
+	}
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+	unsigned long msecs;
+
+	local_irq_disable();
+
+	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+	smp_call_function(machine_crash_nonpanic_core, NULL, false);
+	msecs = 1000; /* Wait at most a second for the other cpus to stop */
+	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+		mdelay(1);
+		msecs--;
+	}
+	if (atomic_read(&waiting_for_crash_ipi) > 0)
+		printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
+
+	crash_save_cpu(regs, smp_processor_id());
+	machine_kexec_mask_interrupts();
+
+	printk(KERN_INFO "Loading crashdump kernel...\n");
+}
+
+/*
+ * Function pointer to optional machine-specific reinitialization
+ */
+void (*kexec_reinit)(void);
+
+void machine_kexec(struct kimage *image)
+{
+	unsigned long page_list;
+	unsigned long reboot_code_buffer_phys;
+	void *reboot_code_buffer;
+
+
+	page_list = image->head & PAGE_MASK;
+
+	/* we need both effective and real address here */
+	reboot_code_buffer_phys =
+	    page_to_pfn(image->control_code_page) << PAGE_SHIFT;
+	reboot_code_buffer = page_address(image->control_code_page);
+
+	/* Prepare parameters for reboot_code_buffer*/
+	kexec_start_address = image->start;
+	kexec_indirection_page = page_list;
+	kexec_mach_type = machine_arch_type;
+	kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
+
+	/* copy our kernel relocation code to the control code page */
+	memcpy(reboot_code_buffer,
+	       relocate_new_kernel, relocate_new_kernel_size);
+
+
+	flush_icache_range((unsigned long) reboot_code_buffer,
+			   (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
+	printk(KERN_INFO "Bye!\n");
+
+	if (kexec_reinit)
+		kexec_reinit();
+
+	soft_restart(reboot_code_buffer_phys);
+}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+	VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/module.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/module.c
new file mode 100644
index 0000000..63a1ba6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/module.c
@@ -0,0 +1,349 @@
+/*
+ *  linux/arch/arm/kernel/module.c
+ *
+ *  Copyright (C) 2002 Russell King.
+ *  Modified for nommu by Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Module allocation method suggested by Andi Kleen.
+ */
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/smp_plat.h>
+#include <asm/unwind.h>
+
+#ifdef CONFIG_XIP_KERNEL
+/*
+ * The XIP kernel text is mapped in the module area for modules and
+ * some other stuff to work without any indirect relocations.
+ * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
+ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
+ */
+#undef MODULES_VADDR
+#define MODULES_VADDR	(((unsigned long)_etext + ~PMD_MASK) & PMD_MASK)
+#endif
+
+#ifdef CONFIG_MMU
+void *module_alloc(unsigned long size)
+{
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+				__builtin_return_address(0));
+}
+#endif
+
+int
+apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+	       unsigned int relindex, struct module *module)
+{
+	Elf32_Shdr *symsec = sechdrs + symindex;
+	Elf32_Shdr *relsec = sechdrs + relindex;
+	Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+	Elf32_Rel *rel = (void *)relsec->sh_addr;
+	unsigned int i;
+
+	for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
+		unsigned long loc;
+		Elf32_Sym *sym;
+		const char *symname;
+		s32 offset;
+#if defined(CONFIG_THUMB2_KERNEL) || defined(USE_CPPS_KO)
+		u32 upper, lower, sign, j1, j2;
+#endif
+
+		offset = ELF32_R_SYM(rel->r_info);
+		if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
+			pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
+				module->name, relindex, i);
+			return -ENOEXEC;
+		}
+
+		sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+		symname = strtab + sym->st_name;
+
+		if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
+			pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
+			       module->name, relindex, i, symname,
+			       rel->r_offset, dstsec->sh_size);
+			return -ENOEXEC;
+		}
+
+		loc = dstsec->sh_addr + rel->r_offset;
+
+		switch (ELF32_R_TYPE(rel->r_info)) {
+		case R_ARM_NONE:
+			/* ignore */
+			break;
+
+		case R_ARM_ABS32:
+		case R_ARM_TARGET1:
+			*(u32 *)loc += sym->st_value;
+			break;
+
+		case R_ARM_REL32:
+ 			*(u32 *)loc += sym->st_value - loc;
+ 			break;
+
+		case R_ARM_PC24:
+		case R_ARM_CALL:
+		case R_ARM_JUMP24:
+			offset = (*(u32 *)loc & 0x00ffffff) << 2;
+			if (offset & 0x02000000)
+				offset -= 0x04000000;
+
+			offset += sym->st_value - loc;
+			if (offset & 3 ||
+			    offset <= (s32)0xfe000000 ||
+			    offset >= (s32)0x02000000) {
+				pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+				       module->name, relindex, i, symname,
+				       ELF32_R_TYPE(rel->r_info), loc,
+				       sym->st_value);
+				return -ENOEXEC;
+			}
+
+			offset >>= 2;
+
+			*(u32 *)loc &= 0xff000000;
+			*(u32 *)loc |= offset & 0x00ffffff;
+			break;
+
+	       case R_ARM_V4BX:
+		       /* Preserve Rm and the condition code. Alter
+			* other bits to re-code instruction as
+			* MOV PC,Rm.
+			*/
+		       *(u32 *)loc &= 0xf000000f;
+		       *(u32 *)loc |= 0x01a0f000;
+		       break;
+
+		case R_ARM_PREL31:
+			offset = *(u32 *)loc + sym->st_value - loc;
+			*(u32 *)loc = offset & 0x7fffffff;
+			break;
+
+		case R_ARM_MOVW_ABS_NC:
+		case R_ARM_MOVT_ABS:
+			offset = *(u32 *)loc;
+			offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
+			offset = (offset ^ 0x8000) - 0x8000;
+
+			offset += sym->st_value;
+			if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
+				offset >>= 16;
+
+			*(u32 *)loc &= 0xfff0f000;
+			*(u32 *)loc |= ((offset & 0xf000) << 4) |
+					(offset & 0x0fff);
+			break;
+
+#if defined(CONFIG_THUMB2_KERNEL) || defined(USE_CPPS_KO)
+		case R_ARM_THM_CALL:
+		case R_ARM_THM_JUMP24:
+			upper = *(u16 *)loc;
+			lower = *(u16 *)(loc + 2);
+
+			/*
+			 * 25 bit signed address range (Thumb-2 BL and B.W
+			 * instructions):
+			 *   S:I1:I2:imm10:imm11:0
+			 * where:
+			 *   S     = upper[10]   = offset[24]
+			 *   I1    = ~(J1 ^ S)   = offset[23]
+			 *   I2    = ~(J2 ^ S)   = offset[22]
+			 *   imm10 = upper[9:0]  = offset[21:12]
+			 *   imm11 = lower[10:0] = offset[11:1]
+			 *   J1    = lower[13]
+			 *   J2    = lower[11]
+			 */
+			sign = (upper >> 10) & 1;
+			j1 = (lower >> 13) & 1;
+			j2 = (lower >> 11) & 1;
+			offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
+				((~(j2 ^ sign) & 1) << 22) |
+				((upper & 0x03ff) << 12) |
+				((lower & 0x07ff) << 1);
+			if (offset & 0x01000000)
+				offset -= 0x02000000;
+			offset += sym->st_value - loc;
+
+			/*
+			 * For function symbols, only Thumb addresses are
+			 * allowed (no interworking).
+			 *
+			 * For non-function symbols, the destination
+			 * has no specific ARM/Thumb disposition, so
+			 * the branch is resolved under the assumption
+			 * that interworking is not required.
+			 */
+			if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
+				!(offset & 1)) ||
+			    offset <= (s32)0xff000000 ||
+			    offset >= (s32)0x01000000) {
+				pr_err("%s: section thumb2 %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+				       module->name, relindex, i, symname,
+				       ELF32_R_TYPE(rel->r_info), loc,
+				       sym->st_value);
+				return -ENOEXEC;
+			}
+
+			sign = (offset >> 24) & 1;
+			j1 = sign ^ (~(offset >> 23) & 1);
+			j2 = sign ^ (~(offset >> 22) & 1);
+			*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+					    ((offset >> 12) & 0x03ff));
+			*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
+						  (j1 << 13) | (j2 << 11) |
+						  ((offset >> 1) & 0x07ff));
+			break;
+
+		case R_ARM_THM_MOVW_ABS_NC:
+		case R_ARM_THM_MOVT_ABS:
+			upper = *(u16 *)loc;
+			lower = *(u16 *)(loc + 2);
+
+			/*
+			 * MOVT/MOVW instructions encoding in Thumb-2:
+			 *
+			 * i	= upper[10]
+			 * imm4	= upper[3:0]
+			 * imm3	= lower[14:12]
+			 * imm8	= lower[7:0]
+			 *
+			 * imm16 = imm4:i:imm3:imm8
+			 */
+			offset = ((upper & 0x000f) << 12) |
+				((upper & 0x0400) << 1) |
+				((lower & 0x7000) >> 4) | (lower & 0x00ff);
+			offset = (offset ^ 0x8000) - 0x8000;
+			offset += sym->st_value;
+
+			if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
+				offset >>= 16;
+
+			*(u16 *)loc = (u16)((upper & 0xfbf0) |
+					    ((offset & 0xf000) >> 12) |
+					    ((offset & 0x0800) >> 1));
+			*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
+						  ((offset & 0x0700) << 4) |
+						  (offset & 0x00ff));
+			break;
+#endif
+
+		default:
+			printk(KERN_ERR "%s: unknown relocation: %u\n",
+			       module->name, ELF32_R_TYPE(rel->r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+
+struct mod_unwind_map {
+	const Elf_Shdr *unw_sec;
+	const Elf_Shdr *txt_sec;
+};
+
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+	const Elf_Shdr *sechdrs, const char *name)
+{
+	const Elf_Shdr *s, *se;
+	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+		if (strcmp(name, secstrs + s->sh_name) == 0)
+			return s;
+
+	return NULL;
+}
+
+extern void fixup_pv_table(const void *, unsigned long);
+extern void fixup_smp(const void *, unsigned long);
+
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+		    struct module *mod)
+{
+	const Elf_Shdr *s = NULL;
+#ifdef CONFIG_ARM_UNWIND
+	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+	const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
+	struct mod_unwind_map maps[ARM_SEC_MAX];
+	int i;
+
+	memset(maps, 0, sizeof(maps));
+
+	for (s = sechdrs; s < sechdrs_end; s++) {
+		const char *secname = secstrs + s->sh_name;
+
+		if (!(s->sh_flags & SHF_ALLOC))
+			continue;
+
+		if (strcmp(".ARM.exidx.init.text", secname) == 0)
+			maps[ARM_SEC_INIT].unw_sec = s;
+		else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
+			maps[ARM_SEC_DEVINIT].unw_sec = s;
+		else if (strcmp(".ARM.exidx", secname) == 0)
+			maps[ARM_SEC_CORE].unw_sec = s;
+		else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
+			maps[ARM_SEC_EXIT].unw_sec = s;
+		else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
+			maps[ARM_SEC_DEVEXIT].unw_sec = s;
+		else if (strcmp(".init.text", secname) == 0)
+			maps[ARM_SEC_INIT].txt_sec = s;
+		else if (strcmp(".devinit.text", secname) == 0)
+			maps[ARM_SEC_DEVINIT].txt_sec = s;
+		else if (strcmp(".text", secname) == 0)
+			maps[ARM_SEC_CORE].txt_sec = s;
+		else if (strcmp(".exit.text", secname) == 0)
+			maps[ARM_SEC_EXIT].txt_sec = s;
+		else if (strcmp(".devexit.text", secname) == 0)
+			maps[ARM_SEC_DEVEXIT].txt_sec = s;
+	}
+
+	for (i = 0; i < ARM_SEC_MAX; i++)
+		if (maps[i].unw_sec && maps[i].txt_sec)
+			mod->arch.unwind[i] =
+				unwind_table_add(maps[i].unw_sec->sh_addr,
+					         maps[i].unw_sec->sh_size,
+					         maps[i].txt_sec->sh_addr,
+					         maps[i].txt_sec->sh_size);
+#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+	s = find_mod_section(hdr, sechdrs, ".pv_table");
+	if (s)
+		fixup_pv_table((void *)s->sh_addr, s->sh_size);
+#endif
+	s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+	if (s && !is_smp())
+#ifdef CONFIG_SMP_ON_UP
+		fixup_smp((void *)s->sh_addr, s->sh_size);
+#else
+		return -EINVAL;
+#endif
+	return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARM_UNWIND
+	int i;
+
+	for (i = 0; i < ARM_SEC_MAX; i++)
+		if (mod->arch.unwind[i])
+			unwind_table_del(mod->arch.unwind[i]);
+#endif
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/myarmksyms.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/myarmksyms.c
new file mode 100644
index 0000000..91dc1bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/myarmksyms.c
@@ -0,0 +1,33 @@
+/*
+ *  linux/arch/arm/kernel/armksyms.c
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/checksum.h>
+#include <asm/ftrace.h>
+
+#ifdef CONFIG_MMU
+
+EXPORT_SYMBOL(__myget_user_1);
+EXPORT_SYMBOL(__myget_user_2);
+EXPORT_SYMBOL(__myget_user_4);
+
+EXPORT_SYMBOL(__myput_user_1);
+EXPORT_SYMBOL(__myput_user_2);
+EXPORT_SYMBOL(__myput_user_4);
+EXPORT_SYMBOL(__myput_user_8);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/opcodes.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/opcodes.c
new file mode 100644
index 0000000..f8179c6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/opcodes.c
@@ -0,0 +1,72 @@
+/*
+ *  linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+	0xF0F0,			/* EQ == Z set            */
+	0x0F0F,			/* NE                     */
+	0xCCCC,			/* CS == C set            */
+	0x3333,			/* CC                     */
+	0xFF00,			/* MI == N set            */
+	0x00FF,			/* PL                     */
+	0xAAAA,			/* VS == V set            */
+	0x5555,			/* VC                     */
+	0x0C0C,			/* HI == C set && Z clear */
+	0xF3F3,			/* LS == C clear || Z set */
+	0xAA55,			/* GE == (N==V)           */
+	0x55AA,			/* LT == (N!=V)           */
+	0x0A05,			/* GT == (!Z && (N==V))   */
+	0xF5FA,			/* LE == (Z || (N!=V))    */
+	0xFFFF,			/* AL always              */
+	0			/* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
+{
+	u32 cc_bits  = opcode >> 28;
+	u32 psr_cond = psr >> 28;
+	unsigned int ret;
+
+	if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+		if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+			ret = ARM_OPCODE_CONDTEST_PASS;
+		else
+			ret = ARM_OPCODE_CONDTEST_FAIL;
+	} else {
+		ret = ARM_OPCODE_CONDTEST_UNCOND;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.c
new file mode 100644
index 0000000..07314af
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.c
@@ -0,0 +1,75 @@
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/stop_machine.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/opcodes.h>
+
+#include "patch.h"
+
+struct patch {
+	void *addr;
+	unsigned int insn;
+};
+
+void __kprobes __patch_text(void *addr, unsigned int insn)
+{
+	bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+	int size;
+
+	if (thumb2 && __opcode_is_thumb16(insn)) {
+		*(u16 *)addr = __opcode_to_mem_thumb16(insn);
+		size = sizeof(u16);
+	} else if (thumb2 && ((uintptr_t)addr & 2)) {
+		u16 first = __opcode_thumb32_first(insn);
+		u16 second = __opcode_thumb32_second(insn);
+		u16 *addrh = addr;
+
+		addrh[0] = __opcode_to_mem_thumb16(first);
+		addrh[1] = __opcode_to_mem_thumb16(second);
+
+		size = sizeof(u32);
+	} else {
+		if (thumb2)
+			insn = __opcode_to_mem_thumb32(insn);
+		else
+			insn = __opcode_to_mem_arm(insn);
+
+		*(u32 *)addr = insn;
+		size = sizeof(u32);
+	}
+
+	flush_icache_range((uintptr_t)(addr),
+			   (uintptr_t)(addr) + size);
+}
+
+static int __kprobes patch_text_stop_machine(void *data)
+{
+	struct patch *patch = data;
+
+	__patch_text(patch->addr, patch->insn);
+
+	return 0;
+}
+
+void __kprobes patch_text(void *addr, unsigned int insn)
+{
+	struct patch patch = {
+		.addr = addr,
+		.insn = insn,
+	};
+
+	if (cache_ops_need_broadcast()) {
+		stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
+	} else {
+		bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
+				      && __opcode_is_thumb32(insn)
+				      && ((uintptr_t)addr & 2);
+
+		if (straddles_word)
+			stop_machine(patch_text_stop_machine, &patch, NULL);
+		else
+			__patch_text(addr, insn);
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.h
new file mode 100644
index 0000000..b4731f2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/patch.h
@@ -0,0 +1,7 @@
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text(void *addr, unsigned int insn);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event.c
new file mode 100644
index 0000000..b7a4871
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event.c
@@ -0,0 +1,867 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code. Callchain code is based on the ARM OProfile backtrace
+ * code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+#include <asm/stacktrace.h>
+
+/*
+ * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
+ * another platform that supports more, we need to increase this to be the
+ * largest of all platforms.
+ *
+ * ARMv7 supports up to 32 events:
+ *  cycle counter CCNT + 31 events counters CNT0..30.
+ *  Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
+ */
+#define ARMPMU_MAX_HWEVENTS		32
+
+static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *cpu_pmu;
+
+enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void)
+{
+	int id = -ENODEV;
+
+	if (cpu_pmu != NULL)
+		id = cpu_pmu->id;
+
+	return id;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+
+int perf_num_counters(void)
+{
+	int max_events = 0;
+
+	if (cpu_pmu != NULL)
+		max_events = cpu_pmu->num_events;
+
+	return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+#define HW_OP_UNSUPPORTED		0xFFFF
+
+#define C(_x) \
+	PERF_COUNT_HW_CACHE_##_x
+
+#define CACHE_OP_UNSUPPORTED		0xFFFF
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+				      [PERF_COUNT_HW_CACHE_MAX]
+				      [PERF_COUNT_HW_CACHE_OP_MAX]
+				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
+		       u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -ENOENT;
+
+	mapping = (*event_map)[config];
+	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+	return (int)(config & raw_event_mask);
+}
+
+static int map_cpu_event(struct perf_event *event,
+			 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+			 const unsigned (*cache_map)
+					[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX],
+			 u32 raw_event_mask)
+{
+	u64 config = event->attr.config;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_HARDWARE:
+		return armpmu_map_event(event_map, config);
+	case PERF_TYPE_HW_CACHE:
+		return armpmu_map_cache_event(cache_map, config);
+	case PERF_TYPE_RAW:
+		return armpmu_map_raw_event(raw_event_mask, config);
+	}
+
+	return -ENOENT;
+}
+
+int
+armpmu_event_set_period(struct perf_event *event,
+			struct hw_perf_event *hwc,
+			int idx)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (left > (s64)armpmu->max_period)
+		left = armpmu->max_period;
+
+	local64_set(&hwc->prev_count, (u64)-left);
+
+	armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+u64
+armpmu_event_update(struct perf_event *event,
+		    struct hw_perf_event *hwc,
+		    int idx)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	u64 delta, prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = armpmu->read_counter(idx);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			     new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	/* Don't read disabled counters! */
+	if (hwc->idx < 0)
+		return;
+
+	armpmu_event_update(event, hwc, hwc->idx);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in armpmu_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		armpmu->disable(hwc, hwc->idx);
+		barrier(); /* why? */
+		armpmu_event_update(event, hwc, hwc->idx);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static void
+armpmu_start(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+	/*
+	 * Set the period again. Some counters can't be stopped, so when we
+	 * were stopped we simply disabled the IRQ source and the counter
+	 * may have been left counting. If we don't do this step then we may
+	 * get an interrupt too soon or *way* too late if the overflow has
+	 * happened since disabling.
+	 */
+	armpmu_event_set_period(event, hwc, hwc->idx);
+	armpmu->enable(hwc, hwc->idx);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	WARN_ON(idx < 0);
+
+	armpmu_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = armpmu->get_event_idx(hw_events, hwc);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	armpmu->disable(hwc, idx);
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		armpmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static int
+validate_event(struct pmu_hw_events *hw_events,
+	       struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event fake_event = event->hw;
+	struct pmu *leader_pmu = event->group_leader->pmu;
+
+	if (is_software_event(event))
+		return 1;
+
+	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct pmu_hw_events fake_pmu;
+	DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
+
+	/*
+	 * Initialise the fake PMU. We only need to populate the
+	 * used_mask for the purposes of validation.
+	 */
+	memset(fake_used_mask, 0, sizeof(fake_used_mask));
+	fake_pmu.used_mask = fake_used_mask;
+
+	if (!validate_event(&fake_pmu, leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(&fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(&fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static irqreturn_t armpmu_platform_irq(int irq, void *dev)
+{
+	struct arm_pmu *armpmu = (struct arm_pmu *) dev;
+	struct platform_device *plat_device = armpmu->plat_device;
+	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
+
+	return plat->handle_irq(irq, dev, armpmu->handle_irq);
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+	int i, irq, irqs;
+	struct platform_device *pmu_device = armpmu->plat_device;
+	struct arm_pmu_platdata *plat =
+		dev_get_platdata(&pmu_device->dev);
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+	for (i = 0; i < irqs; ++i) {
+		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+			continue;
+		irq = platform_get_irq(pmu_device, i);
+		if (irq >= 0) {
+			if (plat && plat->disable_irq)
+				plat->disable_irq(irq);
+			free_irq(irq, armpmu);
+		}
+	}
+
+	release_pmu(armpmu->type);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+	struct arm_pmu_platdata *plat;
+	irq_handler_t handle_irq;
+	int i, err, irq, irqs;
+	struct platform_device *pmu_device = armpmu->plat_device;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	err = reserve_pmu(armpmu->type);
+	if (err) {
+		pr_warning("unable to reserve pmu\n");
+		return err;
+	}
+
+	plat = dev_get_platdata(&pmu_device->dev);
+	if (plat && plat->handle_irq)
+		handle_irq = armpmu_platform_irq;
+	else
+		handle_irq = armpmu->handle_irq;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_err("no irqs for PMUs defined\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < irqs; ++i) {
+		err = 0;
+		irq = platform_get_irq(pmu_device, i);
+		if (irq < 0)
+			continue;
+
+		/*
+		 * If we have a single PMU interrupt that we can't shift,
+		 * assume that we're running on a uniprocessor machine and
+		 * continue. Otherwise, continue without this interrupt.
+		 */
+		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+				    irq, i);
+			continue;
+		}
+
+		err = request_irq(irq, handle_irq,
+				  IRQF_NOBALANCING | IRQF_NO_THREAD,
+				  "arm-pmu", armpmu);
+		if (err) {
+			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+				irq);
+			armpmu_release_hardware(armpmu);
+			return err;
+		} else if (plat && plat->enable_irq)
+			plat->enable_irq(irq);
+
+		cpumask_set_cpu(i, &armpmu->active_irqs);
+	}
+
+	return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	atomic_t *active_events	 = &armpmu->active_events;
+	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+		armpmu_release_hardware(armpmu);
+		mutex_unlock(pmu_reserve_mutex);
+	}
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	return attr->exclude_idle || attr->exclude_user ||
+	       attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping, err;
+
+	mapping = armpmu->map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= 0;
+	hwc->config		= 0;
+	hwc->event_base		= 0;
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!armpmu->set_event_filter ||
+	     armpmu->set_event_filter(hwc, &event->attr)) &&
+	     event_requires_mode_exclusion(&event->attr)) {
+		pr_debug("ARM performance counters do not support "
+			 "mode exclusion\n");
+		return -EPERM;
+	}
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base	    |= (unsigned long)mapping;
+
+	if (!hwc->sample_period) {
+		/*
+		 * For non-sampling runs, limit the sample_period to half
+		 * of the counter width. That way, the new counter value
+		 * is far less likely to overtake the previous one unless
+		 * you have some serious IRQ latency issues.
+		 */
+		hwc->sample_period  = armpmu->max_period >> 1;
+		hwc->last_period    = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	err = 0;
+	if (event->group_leader != event) {
+		err = validate_group(event);
+		if (err)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	int err = 0;
+	atomic_t *active_events = &armpmu->active_events;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (armpmu->map_event(event) == -ENOENT)
+		return -ENOENT;
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (!atomic_inc_not_zero(active_events)) {
+		mutex_lock(&armpmu->reserve_mutex);
+		if (atomic_read(active_events) == 0)
+			err = armpmu_reserve_hardware(armpmu);
+
+		if (!err)
+			atomic_inc(active_events);
+		mutex_unlock(&armpmu->reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+	if (enabled)
+		armpmu->start();
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+	armpmu->stop();
+}
+
+static void __init armpmu_init(struct arm_pmu *armpmu)
+{
+	atomic_set(&armpmu->active_events, 0);
+	mutex_init(&armpmu->reserve_mutex);
+
+	armpmu->pmu = (struct pmu) {
+		.pmu_enable	= armpmu_enable,
+		.pmu_disable	= armpmu_disable,
+		.event_init	= armpmu_event_init,
+		.add		= armpmu_add,
+		.del		= armpmu_del,
+		.start		= armpmu_start,
+		.stop		= armpmu_stop,
+		.read		= armpmu_read,
+	};
+}
+
+int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
+{
+	armpmu_init(armpmu);
+	return perf_pmu_register(&armpmu->pmu, name, type);
+}
+
+/* Include the PMU-specific implementations. */
+#include "perf_event_xscale.c"
+#include "perf_event_v6.c"
+#include "perf_event_v7.c"
+
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+cpu_pmu_reset(void)
+{
+	if (cpu_pmu && cpu_pmu->reset)
+		return on_each_cpu(cpu_pmu->reset, NULL, 1);
+	return 0;
+}
+arch_initcall(cpu_pmu_reset);
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static struct of_device_id armpmu_of_device_ids[] = {
+	{.compatible = "arm,cortex-a9-pmu"},
+	{.compatible = "arm,cortex-a8-pmu"},
+	{.compatible = "arm,arm1136-pmu"},
+	{.compatible = "arm,arm1176-pmu"},
+	{},
+};
+
+static struct platform_device_id armpmu_plat_device_ids[] = {
+	{.name = "arm-pmu"},
+	{},
+};
+
+static int __devinit armpmu_device_probe(struct platform_device *pdev)
+{
+	if (!cpu_pmu)
+		return -ENODEV;
+
+	cpu_pmu->plat_device = pdev;
+	return 0;
+}
+
+static struct platform_driver armpmu_driver = {
+	.driver		= {
+		.name	= "arm-pmu",
+		.of_match_table = armpmu_of_device_ids,
+	},
+	.probe		= armpmu_device_probe,
+	.id_table	= armpmu_plat_device_ids,
+};
+
+static int __init register_pmu_driver(void)
+{
+	return platform_driver_register(&armpmu_driver);
+}
+device_initcall(register_pmu_driver);
+
+static struct pmu_hw_events *armpmu_get_cpu_events(void)
+{
+	return &__get_cpu_var(cpu_hw_events);
+}
+
+static void __init cpu_pmu_init(struct arm_pmu *armpmu)
+{
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+		events->events = per_cpu(hw_events, cpu);
+		events->used_mask = per_cpu(used_mask, cpu);
+		raw_spin_lock_init(&events->pmu_lock);
+	}
+	armpmu->get_hw_events = armpmu_get_cpu_events;
+	armpmu->type = ARM_PMU_DEVICE_CPU;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+					unsigned long action, void *hcpu)
+{
+	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+		return NOTIFY_DONE;
+
+	if (cpu_pmu && cpu_pmu->reset)
+		cpu_pmu->reset(NULL);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+	.notifier_call = pmu_cpu_notify,
+};
+
+/*
+ * CPU PMU identification and registration.
+ */
+static int __init
+init_hw_perf_events(void)
+{
+	unsigned long cpuid = read_cpuid_id();
+	unsigned long implementor = (cpuid & 0xFF000000) >> 24;
+	unsigned long part_number = (cpuid & 0xFFF0);
+
+	/* ARM Ltd CPUs. */
+	if (0x41 == implementor) {
+		switch (part_number) {
+		case 0xB360:	/* ARM1136 */
+		case 0xB560:	/* ARM1156 */
+		case 0xB760:	/* ARM1176 */
+			cpu_pmu = armv6pmu_init();
+			break;
+		case 0xB020:	/* ARM11mpcore */
+			cpu_pmu = armv6mpcore_pmu_init();
+			break;
+		case 0xC080:	/* Cortex-A8 */
+			cpu_pmu = armv7_a8_pmu_init();
+			break;
+		case 0xC090:	/* Cortex-A9 */
+			cpu_pmu = armv7_a9_pmu_init();
+			break;
+		case 0xC050:	/* Cortex-A5 */
+			cpu_pmu = armv7_a5_pmu_init();
+			break;
+		case 0xC0F0:	/* Cortex-A15 */
+			cpu_pmu = armv7_a15_pmu_init();
+			break;
+		case 0xC070:	/* Cortex-A7 */
+			cpu_pmu = armv7_a7_pmu_init();
+			break;
+		}
+	/* Intel CPUs [xscale]. */
+	} else if (0x69 == implementor) {
+		part_number = (cpuid >> 13) & 0x7;
+		switch (part_number) {
+		case 1:
+			cpu_pmu = xscale1pmu_init();
+			break;
+		case 2:
+			cpu_pmu = xscale2pmu_init();
+			break;
+		}
+	}
+
+	if (cpu_pmu) {
+		pr_info("enabled with %s PMU driver, %d counters available\n",
+			cpu_pmu->name, cpu_pmu->num_events);
+		cpu_pmu_init(cpu_pmu);
+		register_cpu_notifier(&pmu_cpu_notifier);
+		armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
+	} else {
+		pr_info("no hardware support available\n");
+	}
+
+	return 0;
+}
+early_initcall(init_hw_perf_events);
+
+/*
+ * Callchain handling code.
+ */
+
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct frame_tail {
+	struct frame_tail __user *fp;
+	unsigned long sp;
+	unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+	       struct perf_callchain_entry *entry)
+{
+	struct frame_tail buftail;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+	if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail + 1 >= buftail.fp)
+		return NULL;
+
+	return buftail.fp - 1;
+}
+
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+	struct frame_tail __user *tail;
+
+
+	perf_callchain_store(entry, regs->ARM_pc);
+	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
+
+	while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+	       tail && !((unsigned long)tail & 0x3))
+		tail = user_backtrace(tail, entry);
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int
+callchain_trace(struct stackframe *fr,
+		void *data)
+{
+	struct perf_callchain_entry *entry = data;
+	perf_callchain_store(entry, fr->pc);
+	return 0;
+}
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+	struct stackframe fr;
+
+	fr.fp = regs->ARM_fp;
+	fr.sp = regs->ARM_sp;
+	fr.lr = regs->ARM_lr;
+	fr.pc = regs->ARM_pc;
+	walk_stackframe(&fr, callchain_trace, entry);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v6.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v6.c
new file mode 100644
index 0000000..b78af0c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v6.c
@@ -0,0 +1,719 @@
+/*
+ * ARMv6 Performance counter handling code.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ *
+ * ARMv6 has 2 configurable performance counters and a single cycle counter.
+ * They all share a single reset bit but can be written to zero so we can use
+ * that for a reset.
+ *
+ * The counters can't be individually enabled or disabled so when we remove
+ * one event and replace it with another we could get spurious counts from the
+ * wrong event. However, we can take advantage of the fact that the
+ * performance counters can export events to the event bus, and the event bus
+ * itself can be monitored. This requires that we *don't* export the events to
+ * the event bus. The procedure for disabling a configurable counter is:
+ *	- change the counter to count the ETMEXTOUT[0] signal (0x20). This
+ *	  effectively stops the counter from counting.
+ *	- disable the counter's interrupt generation (each counter has it's
+ *	  own interrupt enable bit).
+ * Once stopped, the counter value can be written as 0 to reset.
+ *
+ * To enable a counter:
+ *	- enable the counter's interrupt generation.
+ *	- set the new event type.
+ *
+ * Note: the dedicated cycle counter only counts cycles and can't be
+ * enabled/disabled independently of the others. When we want to disable the
+ * cycle counter, we have to just disable the interrupt reporting and start
+ * ignoring that counter. When re-enabling, we have to reset the value and
+ * enable the interrupt.
+ */
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+enum armv6_perf_types {
+	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
+	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
+	ARMV6_PERFCTR_DDEP_STALL	    = 0x2,
+	ARMV6_PERFCTR_ITLB_MISS		    = 0x3,
+	ARMV6_PERFCTR_DTLB_MISS		    = 0x4,
+	ARMV6_PERFCTR_BR_EXEC		    = 0x5,
+	ARMV6_PERFCTR_BR_MISPREDICT	    = 0x6,
+	ARMV6_PERFCTR_INSTR_EXEC	    = 0x7,
+	ARMV6_PERFCTR_DCACHE_HIT	    = 0x9,
+	ARMV6_PERFCTR_DCACHE_ACCESS	    = 0xA,
+	ARMV6_PERFCTR_DCACHE_MISS	    = 0xB,
+	ARMV6_PERFCTR_DCACHE_WBACK	    = 0xC,
+	ARMV6_PERFCTR_SW_PC_CHANGE	    = 0xD,
+	ARMV6_PERFCTR_MAIN_TLB_MISS	    = 0xF,
+	ARMV6_PERFCTR_EXPL_D_ACCESS	    = 0x10,
+	ARMV6_PERFCTR_LSU_FULL_STALL	    = 0x11,
+	ARMV6_PERFCTR_WBUF_DRAINED	    = 0x12,
+	ARMV6_PERFCTR_CPU_CYCLES	    = 0xFF,
+	ARMV6_PERFCTR_NOP		    = 0x20,
+};
+
+enum armv6_counters {
+	ARMV6_CYCLE_COUNTER = 0,
+	ARMV6_COUNTER0,
+	ARMV6_COUNTER1,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6_PERFCTR_IBUF_STALL,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6_PERFCTR_LSU_FULL_STALL,
+};
+
+static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * The ARM performance counters can count micro DTLB misses,
+		 * micro ITLB misses and main TLB misses. There isn't an event
+		 * for TLB misses, so use the micro misses here and if users
+		 * want the main TLB misses they can use a raw counter.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+enum armv6mpcore_perf_types {
+	ARMV6MPCORE_PERFCTR_ICACHE_MISS	    = 0x0,
+	ARMV6MPCORE_PERFCTR_IBUF_STALL	    = 0x1,
+	ARMV6MPCORE_PERFCTR_DDEP_STALL	    = 0x2,
+	ARMV6MPCORE_PERFCTR_ITLB_MISS	    = 0x3,
+	ARMV6MPCORE_PERFCTR_DTLB_MISS	    = 0x4,
+	ARMV6MPCORE_PERFCTR_BR_EXEC	    = 0x5,
+	ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
+	ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
+	ARMV6MPCORE_PERFCTR_INSTR_EXEC	    = 0x8,
+	ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
+	ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
+	ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
+	ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
+	ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
+	ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
+	ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
+	ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
+	ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
+	ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
+	ARMV6MPCORE_PERFCTR_CPU_CYCLES	    = 0xFF,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6MPCORE_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6MPCORE_PERFCTR_IBUF_STALL,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
+};
+
+static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  =
+				ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
+			[C(RESULT_MISS)]    =
+				ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  =
+				ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
+			[C(RESULT_MISS)]    =
+				ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * The ARM performance counters can count micro DTLB misses,
+		 * micro ITLB misses and main TLB misses. There isn't an event
+		 * for TLB misses, so use the micro misses here and if users
+		 * want the main TLB misses they can use a raw counter.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+static inline unsigned long
+armv6_pmcr_read(void)
+{
+	u32 val;
+	asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
+	return val;
+}
+
+static inline void
+armv6_pmcr_write(unsigned long val)
+{
+	asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
+}
+
+#define ARMV6_PMCR_ENABLE		(1 << 0)
+#define ARMV6_PMCR_CTR01_RESET		(1 << 1)
+#define ARMV6_PMCR_CCOUNT_RESET		(1 << 2)
+#define ARMV6_PMCR_CCOUNT_DIV		(1 << 3)
+#define ARMV6_PMCR_COUNT0_IEN		(1 << 4)
+#define ARMV6_PMCR_COUNT1_IEN		(1 << 5)
+#define ARMV6_PMCR_CCOUNT_IEN		(1 << 6)
+#define ARMV6_PMCR_COUNT0_OVERFLOW	(1 << 8)
+#define ARMV6_PMCR_COUNT1_OVERFLOW	(1 << 9)
+#define ARMV6_PMCR_CCOUNT_OVERFLOW	(1 << 10)
+#define ARMV6_PMCR_EVT_COUNT0_SHIFT	20
+#define ARMV6_PMCR_EVT_COUNT0_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
+#define ARMV6_PMCR_EVT_COUNT1_SHIFT	12
+#define ARMV6_PMCR_EVT_COUNT1_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
+
+#define ARMV6_PMCR_OVERFLOWED_MASK \
+	(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
+	 ARMV6_PMCR_CCOUNT_OVERFLOW)
+
+static inline int
+armv6_pmcr_has_overflowed(unsigned long pmcr)
+{
+	return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
+}
+
+static inline int
+armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
+				  enum armv6_counters counter)
+{
+	int ret = 0;
+
+	if (ARMV6_CYCLE_COUNTER == counter)
+		ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
+	else if (ARMV6_COUNTER0 == counter)
+		ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
+	else if (ARMV6_COUNTER1 == counter)
+		ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+	return ret;
+}
+
+static inline u32
+armv6pmu_read_counter(int counter)
+{
+	unsigned long value = 0;
+
+	if (ARMV6_CYCLE_COUNTER == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
+	else if (ARMV6_COUNTER0 == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
+	else if (ARMV6_COUNTER1 == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+	return value;
+}
+
+static inline void
+armv6pmu_write_counter(int counter,
+		       u32 value)
+{
+	if (ARMV6_CYCLE_COUNTER == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
+	else if (ARMV6_COUNTER0 == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
+	else if (ARMV6_COUNTER1 == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+}
+
+static void
+armv6pmu_enable_event(struct hw_perf_event *hwc,
+		      int idx)
+{
+	unsigned long val, mask, evt, flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= 0;
+		evt	= ARMV6_PMCR_CCOUNT_IEN;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_EVT_COUNT0_MASK;
+		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
+			  ARMV6_PMCR_COUNT0_IEN;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_EVT_COUNT1_MASK;
+		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
+			  ARMV6_PMCR_COUNT1_IEN;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Mask out the current event and set the counter to count the event
+	 * that we're interested in.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static irqreturn_t
+armv6pmu_handle_irq(int irq_num,
+		    void *dev)
+{
+	unsigned long pmcr = armv6_pmcr_read();
+	struct perf_sample_data data;
+	struct pmu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	if (!armv6_pmcr_has_overflowed(pmcr))
+		return IRQ_NONE;
+
+	regs = get_irq_regs();
+
+	/*
+	 * The interrupts are cleared by writing the overflow flags back to
+	 * the control register. All of the other bits don't have any effect
+	 * if they are rewritten, so write the whole value back.
+	 */
+	armv6_pmcr_write(pmcr);
+
+	perf_sample_data_init(&data, 0);
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		/* Ignore if we don't have an event. */
+		if (!event)
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(hwc, idx);
+	}
+
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts disabled. For
+	 * platforms that can have the PMU interrupts raised as an NMI, this
+	 * will not work.
+	 */
+	irq_work_run();
+
+	return IRQ_HANDLED;
+}
+
+static void
+armv6pmu_start(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val |= ARMV6_PMCR_ENABLE;
+	armv6_pmcr_write(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+armv6pmu_stop(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~ARMV6_PMCR_ENABLE;
+	armv6_pmcr_write(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int
+armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
+		       struct hw_perf_event *event)
+{
+	/* Always place a cycle counter into the cycle counter. */
+	if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
+			return -EAGAIN;
+
+		return ARMV6_CYCLE_COUNTER;
+	} else {
+		/*
+		 * For anything other than a cycle counter, try and use
+		 * counter0 and counter1.
+		 */
+		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
+			return ARMV6_COUNTER1;
+
+		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
+			return ARMV6_COUNTER0;
+
+		/* The counters are all in use. */
+		return -EAGAIN;
+	}
+}
+
+static void
+armv6pmu_disable_event(struct hw_perf_event *hwc,
+		       int idx)
+{
+	unsigned long val, mask, evt, flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= ARMV6_PMCR_CCOUNT_IEN;
+		evt	= 0;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
+		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
+		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Mask out the current event and set the counter to count the number
+	 * of ETM bus signal assertion cycles. The external reporting should
+	 * be disabled and so this should never increment.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
+			      int idx)
+{
+	unsigned long val, mask, flags, evt = 0;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= ARMV6_PMCR_CCOUNT_IEN;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_COUNT0_IEN;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_COUNT1_IEN;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
+	 * simply disable the interrupt reporting.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv6_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv6_perf_map,
+				&armv6_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu armv6pmu = {
+	.id			= ARM_PERF_PMU_ID_V6,
+	.name			= "v6",
+	.handle_irq		= armv6pmu_handle_irq,
+	.enable			= armv6pmu_enable_event,
+	.disable		= armv6pmu_disable_event,
+	.read_counter		= armv6pmu_read_counter,
+	.write_counter		= armv6pmu_write_counter,
+	.get_event_idx		= armv6pmu_get_event_idx,
+	.start			= armv6pmu_start,
+	.stop			= armv6pmu_stop,
+	.map_event		= armv6_map_event,
+	.num_events		= 3,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+static struct arm_pmu *__init armv6pmu_init(void)
+{
+	return &armv6pmu;
+}
+
+/*
+ * ARMv6mpcore is almost identical to single core ARMv6 with the exception
+ * that some of the events have different enumerations and that there is no
+ * *hack* to stop the programmable counters. To stop the counters we simply
+ * disable the interrupt reporting and update the event. When unthrottling we
+ * reset the period and enable the interrupt reporting.
+ */
+
+static int armv6mpcore_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv6mpcore_perf_map,
+				&armv6mpcore_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu armv6mpcore_pmu = {
+	.id			= ARM_PERF_PMU_ID_V6MP,
+	.name			= "v6mpcore",
+	.handle_irq		= armv6pmu_handle_irq,
+	.enable			= armv6pmu_enable_event,
+	.disable		= armv6mpcore_pmu_disable_event,
+	.read_counter		= armv6pmu_read_counter,
+	.write_counter		= armv6pmu_write_counter,
+	.get_event_idx		= armv6pmu_get_event_idx,
+	.start			= armv6pmu_start,
+	.stop			= armv6pmu_stop,
+	.map_event		= armv6mpcore_map_event,
+	.num_events		= 3,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+{
+	return &armv6mpcore_pmu;
+}
+#else
+static struct arm_pmu *__init armv6pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+{
+	return NULL;
+}
+#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v7.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v7.c
new file mode 100644
index 0000000..7deaa7f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_v7.c
@@ -0,0 +1,1332 @@
+/*
+ * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
+ *
+ * ARMv7 support: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ *
+ * Copied from ARMv6 code, with the low level code inspired
+ *  by the ARMv7 Oprofile code.
+ *
+ * Cortex-A8 has up to 4 configurable performance counters and
+ *  a single cycle counter.
+ * Cortex-A9 has up to 31 configurable performance counters and
+ *  a single cycle counter.
+ *
+ * All counters can be enabled/disabled and IRQ masked separately. The cycle
+ *  counter and all 4 performance counters together can be reset separately.
+ */
+
+#ifdef CONFIG_CPU_V7
+
+static struct arm_pmu armv7pmu;
+
+/*
+ * Common ARMv7 event types
+ *
+ * Note: An implementation may not be able to count all of these events
+ * but the encodings are considered to be `reserved' in the case that
+ * they are not available.
+ */
+enum armv7_perf_types {
+	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
+	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
+	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
+	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
+	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
+	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
+	ARMV7_PERFCTR_MEM_READ				= 0x06,
+	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
+	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
+	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
+	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
+	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
+
+	/*
+	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+	 * It counts:
+	 *  - all (taken) branch instructions,
+	 *  - instructions that explicitly write the PC,
+	 *  - exception generating instructions.
+	 */
+	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
+	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
+	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
+	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
+	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
+	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
+	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
+
+	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
+	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
+	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
+	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
+	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
+	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
+	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
+	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
+	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
+	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
+	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
+	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
+
+	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
+};
+
+/* ARMv7 Cortex-A8 specific event types */
+enum armv7_a8_perf_types {
+	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
+	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
+	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
+	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
+};
+
+/* ARMv7 Cortex-A9 specific event types */
+enum armv7_a9_perf_types {
+	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
+	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
+	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
+};
+
+/* ARMv7 Cortex-A5 specific event types */
+enum armv7_a5_perf_types {
+	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
+	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
+};
+
+/* ARMv7 Cortex-A15 specific event types */
+enum armv7_a15_perf_types {
+	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
+	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
+	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
+	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
+
+	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
+	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
+
+	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
+	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
+	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
+	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
+
+	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
+};
+
+/*
+ * Cortex-A8 HW events mapping
+ *
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Cortex-A9 HW events mapping
+ */
+static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
+};
+
+static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Cortex-A5 HW events mapping
+ */
+static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		/*
+		 * The prefetch counters don't differentiate between the I
+		 * side and the D side.
+		 */
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Cortex-A15 HW events mapping
+ */
+static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		/*
+		 * Not all performance counters differentiate between read
+		 * and write accesses/misses so we're not always strictly
+		 * correct, but it's the best we can do. Writes and reads get
+		 * combined in these cases.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Cortex-A7 HW events mapping
+ */
+static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Perf Events' indices
+ */
+#define	ARMV7_IDX_CYCLE_COUNTER	0
+#define	ARMV7_IDX_COUNTER0	1
+#define	ARMV7_IDX_COUNTER_LAST	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+#define	ARMV7_MAX_COUNTERS	32
+#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
+
+/*
+ * ARMv7 low level PMNC access
+ */
+
+/*
+ * Perf Event to low level counters mapping
+ */
+#define	ARMV7_IDX_TO_COUNTER(x)	\
+	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
+
+/*
+ * Per-CPU PMNC: config reg
+ */
+#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
+#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
+#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
+#define	ARMV7_PMNC_N_MASK	0x1f
+#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
+
+/*
+ * FLAG: counters overflow flag status reg
+ */
+#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
+#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
+#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv2
+ */
+#define	ARMV7_EXCLUDE_PL1	(1 << 31)
+#define	ARMV7_EXCLUDE_USER	(1 << 30)
+#define	ARMV7_INCLUDE_HYP	(1 << 27)
+
+static inline u32 armv7_pmnc_read(void)
+{
+	u32 val;
+	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
+	return val;
+}
+
+static inline void armv7_pmnc_write(u32 val)
+{
+	val &= ARMV7_PMNC_MASK;
+	isb();
+	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+}
+
+static inline int armv7_pmnc_has_overflowed(u32 pmnc)
+{
+	return pmnc & ARMV7_OVERFLOWED_MASK;
+}
+
+static inline int armv7_pmnc_counter_valid(int idx)
+{
+	return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
+{
+	int ret = 0;
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u checking wrong counter %d overflow status\n",
+			smp_processor_id(), idx);
+	} else {
+		counter = ARMV7_IDX_TO_COUNTER(idx);
+		ret = pmnc & BIT(counter);
+	}
+
+	return ret;
+}
+
+static inline int armv7_pmnc_select_counter(int idx)
+{
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u selecting wrong PMNC counter %d\n",
+			smp_processor_id(), idx);
+		return -EINVAL;
+	}
+
+	counter = ARMV7_IDX_TO_COUNTER(idx);
+	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
+	isb();
+
+	return idx;
+}
+
+static inline u32 armv7pmu_read_counter(int idx)
+{
+	u32 value = 0;
+
+	if (!armv7_pmnc_counter_valid(idx))
+		pr_err("CPU%u reading wrong counter %d\n",
+			smp_processor_id(), idx);
+	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+	else if (armv7_pmnc_select_counter(idx) == idx)
+		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
+
+	return value;
+}
+
+static inline void armv7pmu_write_counter(int idx, u32 value)
+{
+	if (!armv7_pmnc_counter_valid(idx))
+		pr_err("CPU%u writing wrong counter %d\n",
+			smp_processor_id(), idx);
+	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+	else if (armv7_pmnc_select_counter(idx) == idx)
+		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+}
+
+static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
+{
+	if (armv7_pmnc_select_counter(idx) == idx) {
+		val &= ARMV7_EVTYPE_MASK;
+		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+	}
+}
+
+static inline int armv7_pmnc_enable_counter(int idx)
+{
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u enabling wrong PMNC counter %d\n",
+			smp_processor_id(), idx);
+		return -EINVAL;
+	}
+
+	counter = ARMV7_IDX_TO_COUNTER(idx);
+	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
+	return idx;
+}
+
+static inline int armv7_pmnc_disable_counter(int idx)
+{
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u disabling wrong PMNC counter %d\n",
+			smp_processor_id(), idx);
+		return -EINVAL;
+	}
+
+	counter = ARMV7_IDX_TO_COUNTER(idx);
+	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
+	return idx;
+}
+
+static inline int armv7_pmnc_enable_intens(int idx)
+{
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
+			smp_processor_id(), idx);
+		return -EINVAL;
+	}
+
+	counter = ARMV7_IDX_TO_COUNTER(idx);
+	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
+	return idx;
+}
+
+static inline int armv7_pmnc_disable_intens(int idx)
+{
+	u32 counter;
+
+	if (!armv7_pmnc_counter_valid(idx)) {
+		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
+			smp_processor_id(), idx);
+		return -EINVAL;
+	}
+
+	counter = ARMV7_IDX_TO_COUNTER(idx);
+	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+	isb();
+	/* Clear the overflow flag in case an interrupt is pending. */
+	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+	isb();
+
+	return idx;
+}
+
+static inline u32 armv7_pmnc_getreset_flags(void)
+{
+	u32 val;
+
+	/* Read */
+	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+
+	/* Write to clear flags */
+	val &= ARMV7_FLAG_MASK;
+	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+
+	return val;
+}
+
+#ifdef DEBUG
+static void armv7_pmnc_dump_regs(void)
+{
+	u32 val;
+	unsigned int cnt;
+
+	printk(KERN_INFO "PMNC registers dump:\n");
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+	printk(KERN_INFO "PMNC  =0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+	printk(KERN_INFO "CNTENS=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+	printk(KERN_INFO "INTENS=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+	printk(KERN_INFO "FLAGS =0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+	printk(KERN_INFO "SELECT=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+	printk(KERN_INFO "CCNT  =0x%08x\n", val);
+
+	for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
+		armv7_pmnc_select_counter(cnt);
+		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+			ARMV7_IDX_TO_COUNTER(cnt), val);
+		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+			ARMV7_IDX_TO_COUNTER(cnt), val);
+	}
+}
+#endif
+
+static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	/*
+	 * Enable counter and interrupt, and set the counter to count
+	 * the event that we're interested in.
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv7_pmnc_disable_counter(idx);
+
+	/*
+	 * Set event (if destined for PMNx counters)
+	 * We only need to set the event for the cycle counter if we
+	 * have the ability to perform event filtering.
+	 */
+	if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
+		armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+	/*
+	 * Enable interrupt for this counter
+	 */
+	armv7_pmnc_enable_intens(idx);
+
+	/*
+	 * Enable counter
+	 */
+	armv7_pmnc_enable_counter(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	/*
+	 * Disable counter and interrupt
+	 */
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv7_pmnc_disable_counter(idx);
+
+	/*
+	 * Disable interrupt for this counter
+	 */
+	armv7_pmnc_disable_intens(idx);
+
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+{
+	u32 pmnc;
+	struct perf_sample_data data;
+	struct pmu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	/*
+	 * Get and reset the IRQ flags
+	 */
+	pmnc = armv7_pmnc_getreset_flags();
+
+	/*
+	 * Did an overflow occur?
+	 */
+	if (!armv7_pmnc_has_overflowed(pmnc))
+		return IRQ_NONE;
+
+	/*
+	 * Handle the counter(s) overflow(s)
+	 */
+	regs = get_irq_regs();
+
+	perf_sample_data_init(&data, 0);
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		/* Ignore if we don't have an event. */
+		if (!event)
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(hwc, idx);
+	}
+
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts disabled. For
+	 * platforms that can have the PMU interrupts raised as an NMI, this
+	 * will not work.
+	 */
+	irq_work_run();
+
+	return IRQ_HANDLED;
+}
+
+static void armv7pmu_start(void)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	/* Enable all counters */
+	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv7pmu_stop(void)
+{
+	unsigned long flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	/* Disable all counters */
+	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
+				  struct hw_perf_event *event)
+{
+	int idx;
+	unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
+
+	/* Always place a cycle counter into the cycle counter. */
+	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
+		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
+			return -EAGAIN;
+
+		return ARMV7_IDX_CYCLE_COUNTER;
+	}
+
+	/*
+	 * For anything other than a cycle counter, try and use
+	 * the events counters
+	 */
+	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+		if (!test_and_set_bit(idx, cpuc->used_mask))
+			return idx;
+	}
+
+	/* The counters are all in use. */
+	return -EAGAIN;
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv7pmu_set_event_filter(struct hw_perf_event *event,
+				     struct perf_event_attr *attr)
+{
+	unsigned long config_base = 0;
+
+	if (attr->exclude_idle)
+		return -EPERM;
+	if (attr->exclude_user)
+		config_base |= ARMV7_EXCLUDE_USER;
+	if (attr->exclude_kernel)
+		config_base |= ARMV7_EXCLUDE_PL1;
+	if (!attr->exclude_hv)
+		config_base |= ARMV7_INCLUDE_HYP;
+
+	/*
+	 * Install the filter into config_base as this is used to
+	 * construct the event type.
+	 */
+	event->config_base = config_base;
+
+	return 0;
+}
+
+static void armv7pmu_reset(void *info)
+{
+	u32 idx, nb_cnt = cpu_pmu->num_events;
+
+	/* The counter and interrupt enable registers are unknown at reset. */
+	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
+		armv7pmu_disable_event(NULL, idx);
+
+	/* Initialize & Reset PMNC: C and P bits */
+	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
+static int armv7_a8_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv7_a8_perf_map,
+				&armv7_a8_perf_cache_map, 0xFF);
+}
+
+static int armv7_a9_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv7_a9_perf_map,
+				&armv7_a9_perf_cache_map, 0xFF);
+}
+
+static int armv7_a5_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv7_a5_perf_map,
+				&armv7_a5_perf_cache_map, 0xFF);
+}
+
+static int armv7_a15_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv7_a15_perf_map,
+				&armv7_a15_perf_cache_map, 0xFF);
+}
+
+static int armv7_a7_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &armv7_a7_perf_map,
+				&armv7_a7_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu armv7pmu = {
+	.handle_irq		= armv7pmu_handle_irq,
+	.enable			= armv7pmu_enable_event,
+	.disable		= armv7pmu_disable_event,
+	.read_counter		= armv7pmu_read_counter,
+	.write_counter		= armv7pmu_write_counter,
+	.get_event_idx		= armv7pmu_get_event_idx,
+	.start			= armv7pmu_start,
+	.stop			= armv7pmu_stop,
+	.reset			= armv7pmu_reset,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+static u32 __init armv7_read_num_pmnc_events(void)
+{
+	u32 nb_cnt;
+
+	/* Read the nb of CNTx counters supported from PMNC */
+	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+
+	/* Add the CPU cycles counter and return */
+	return nb_cnt + 1;
+}
+
+static struct arm_pmu *__init armv7_a8_pmu_init(void)
+{
+	armv7pmu.id		= ARM_PERF_PMU_ID_CA8;
+	armv7pmu.name		= "ARMv7 Cortex-A8";
+	armv7pmu.map_event	= armv7_a8_map_event;
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
+	return &armv7pmu;
+}
+
+static struct arm_pmu *__init armv7_a9_pmu_init(void)
+{
+	armv7pmu.id		= ARM_PERF_PMU_ID_CA9;
+	armv7pmu.name		= "ARMv7 Cortex-A9";
+	armv7pmu.map_event	= armv7_a9_map_event;
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
+	return &armv7pmu;
+}
+
+static struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+	armv7pmu.id		= ARM_PERF_PMU_ID_CA5;
+	armv7pmu.name		= "ARMv7 Cortex-A5";
+	armv7pmu.map_event	= armv7_a5_map_event;
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
+	return &armv7pmu;
+}
+
+static struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+	armv7pmu.id		= ARM_PERF_PMU_ID_CA15;
+	armv7pmu.name		= "ARMv7 Cortex-A15";
+	armv7pmu.map_event	= armv7_a15_map_event;
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
+	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
+	return &armv7pmu;
+}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+	armv7pmu.id		= ARM_PERF_PMU_ID_CA7;
+	armv7pmu.name		= "ARMv7 Cortex-A7";
+	armv7pmu.map_event	= armv7_a7_map_event;
+	armv7pmu.num_events	= armv7_read_num_pmnc_events();
+	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
+	return &armv7pmu;
+}
+#else
+static struct arm_pmu *__init armv7_a8_pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init armv7_a9_pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+	return NULL;
+}
+#endif	/* CONFIG_CPU_V7 */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_xscale.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_xscale.c
new file mode 100644
index 0000000..71a21e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/perf_event_xscale.c
@@ -0,0 +1,839 @@
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * Based on the previous xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ * 	- xscale1pmu: 2 event counters and a cycle counter
+ * 	- xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+#ifdef CONFIG_CPU_XSCALE
+enum xscale_perf_types {
+	XSCALE_PERFCTR_ICACHE_MISS		= 0x00,
+	XSCALE_PERFCTR_ICACHE_NO_DELIVER	= 0x01,
+	XSCALE_PERFCTR_DATA_STALL		= 0x02,
+	XSCALE_PERFCTR_ITLB_MISS		= 0x03,
+	XSCALE_PERFCTR_DTLB_MISS		= 0x04,
+	XSCALE_PERFCTR_BRANCH			= 0x05,
+	XSCALE_PERFCTR_BRANCH_MISS		= 0x06,
+	XSCALE_PERFCTR_INSTRUCTION		= 0x07,
+	XSCALE_PERFCTR_DCACHE_FULL_STALL	= 0x08,
+	XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG	= 0x09,
+	XSCALE_PERFCTR_DCACHE_ACCESS		= 0x0A,
+	XSCALE_PERFCTR_DCACHE_MISS		= 0x0B,
+	XSCALE_PERFCTR_DCACHE_WRITE_BACK	= 0x0C,
+	XSCALE_PERFCTR_PC_CHANGED		= 0x0D,
+	XSCALE_PERFCTR_BCU_REQUEST		= 0x10,
+	XSCALE_PERFCTR_BCU_FULL			= 0x11,
+	XSCALE_PERFCTR_BCU_DRAIN		= 0x12,
+	XSCALE_PERFCTR_BCU_ECC_NO_ELOG		= 0x14,
+	XSCALE_PERFCTR_BCU_1_BIT_ERR		= 0x15,
+	XSCALE_PERFCTR_RMW			= 0x16,
+	/* XSCALE_PERFCTR_CCNT is not hardware defined */
+	XSCALE_PERFCTR_CCNT			= 0xFE,
+	XSCALE_PERFCTR_UNUSED			= 0xFF,
+};
+
+enum xscale_counters {
+	XSCALE_CYCLE_COUNTER	= 0,
+	XSCALE_COUNTER0,
+	XSCALE_COUNTER1,
+	XSCALE_COUNTER2,
+	XSCALE_COUNTER3,
+};
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= XSCALE_PERFCTR_CCNT,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= XSCALE_PERFCTR_INSTRUCTION,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XSCALE_PERFCTR_BRANCH,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= XSCALE_PERFCTR_BRANCH_MISS,
+	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XSCALE_PERFCTR_ICACHE_NO_DELIVER,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					   [PERF_COUNT_HW_CACHE_OP_MAX]
+					   [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(NODE)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+#define	XSCALE_PMU_ENABLE	0x001
+#define XSCALE_PMN_RESET	0x002
+#define	XSCALE_CCNT_RESET	0x004
+#define	XSCALE_PMU_RESET	(CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64	0x008
+
+#define XSCALE1_OVERFLOWED_MASK	0x700
+#define XSCALE1_CCOUNT_OVERFLOW	0x400
+#define XSCALE1_COUNT0_OVERFLOW	0x100
+#define XSCALE1_COUNT1_OVERFLOW	0x200
+#define XSCALE1_CCOUNT_INT_EN	0x040
+#define XSCALE1_COUNT0_INT_EN	0x010
+#define XSCALE1_COUNT1_INT_EN	0x020
+#define XSCALE1_COUNT0_EVT_SHFT	12
+#define XSCALE1_COUNT0_EVT_MASK	(0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT	20
+#define XSCALE1_COUNT1_EVT_MASK	(0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+	u32 val;
+	asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+	return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+	/* upper 4bits and 7, 11 are write-as-0 */
+	val &= 0xffff77f;
+	asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+					enum xscale_counters counter)
+{
+	int ret = 0;
+
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+		break;
+	case XSCALE_COUNTER0:
+		ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+		break;
+	case XSCALE_COUNTER1:
+		ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+	}
+
+	return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(int irq_num, void *dev)
+{
+	unsigned long pmnc;
+	struct perf_sample_data data;
+	struct pmu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	/*
+	 * NOTE: there's an A stepping erratum that states if an overflow
+	 *       bit already exists and another occurs, the previous
+	 *       Overflow bit gets cleared. There's no workaround.
+	 *	 Fixed in B stepping or later.
+	 */
+	pmnc = xscale1pmu_read_pmnc();
+
+	/*
+	 * Write the value back to clear the overflow flags. Overflow
+	 * flags remain in pmnc for use below. We also disable the PMU
+	 * while we process the interrupt.
+	 */
+	xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+	if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+		return IRQ_NONE;
+
+	regs = get_irq_regs();
+
+	perf_sample_data_init(&data, 0);
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (!event)
+			continue;
+
+		if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(hwc, idx);
+	}
+
+	irq_work_run();
+
+	/*
+	 * Re-enable the PMU.
+	 */
+	pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+	xscale1pmu_write_pmnc(pmnc);
+
+	return IRQ_HANDLED;
+}
+
+static void
+xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long val, mask, evt, flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	switch (idx) {
+	case XSCALE_CYCLE_COUNTER:
+		mask = 0;
+		evt = XSCALE1_CCOUNT_INT_EN;
+		break;
+	case XSCALE_COUNTER0:
+		mask = XSCALE1_COUNT0_EVT_MASK;
+		evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+			XSCALE1_COUNT0_INT_EN;
+		break;
+	case XSCALE_COUNTER1:
+		mask = XSCALE1_COUNT1_EVT_MASK;
+		evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+			XSCALE1_COUNT1_INT_EN;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale1pmu_read_pmnc();
+	val &= ~mask;
+	val |= evt;
+	xscale1pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long val, mask, evt, flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	switch (idx) {
+	case XSCALE_CYCLE_COUNTER:
+		mask = XSCALE1_CCOUNT_INT_EN;
+		evt = 0;
+		break;
+	case XSCALE_COUNTER0:
+		mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+		break;
+	case XSCALE_COUNTER1:
+		mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale1pmu_read_pmnc();
+	val &= ~mask;
+	val |= evt;
+	xscale1pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int
+xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
+			struct hw_perf_event *event)
+{
+	if (XSCALE_PERFCTR_CCNT == event->config_base) {
+		if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+			return -EAGAIN;
+
+		return XSCALE_CYCLE_COUNTER;
+	} else {
+		if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
+			return XSCALE_COUNTER1;
+
+		if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
+			return XSCALE_COUNTER0;
+
+		return -EAGAIN;
+	}
+}
+
+static void
+xscale1pmu_start(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale1pmu_read_pmnc();
+	val |= XSCALE_PMU_ENABLE;
+	xscale1pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+xscale1pmu_stop(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale1pmu_read_pmnc();
+	val &= ~XSCALE_PMU_ENABLE;
+	xscale1pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static inline u32
+xscale1pmu_read_counter(int counter)
+{
+	u32 val = 0;
+
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER0:
+		asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER1:
+		asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+		break;
+	}
+
+	return val;
+}
+
+static inline void
+xscale1pmu_write_counter(int counter, u32 val)
+{
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER0:
+		asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER1:
+		asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+		break;
+	}
+}
+
+static int xscale_map_event(struct perf_event *event)
+{
+	return map_cpu_event(event, &xscale_perf_map,
+				&xscale_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu xscale1pmu = {
+	.id		= ARM_PERF_PMU_ID_XSCALE1,
+	.name		= "xscale1",
+	.handle_irq	= xscale1pmu_handle_irq,
+	.enable		= xscale1pmu_enable_event,
+	.disable	= xscale1pmu_disable_event,
+	.read_counter	= xscale1pmu_read_counter,
+	.write_counter	= xscale1pmu_write_counter,
+	.get_event_idx	= xscale1pmu_get_event_idx,
+	.start		= xscale1pmu_start,
+	.stop		= xscale1pmu_stop,
+	.map_event	= xscale_map_event,
+	.num_events	= 3,
+	.max_period	= (1LLU << 32) - 1,
+};
+
+static struct arm_pmu *__init xscale1pmu_init(void)
+{
+	return &xscale1pmu;
+}
+
+#define XSCALE2_OVERFLOWED_MASK	0x01f
+#define XSCALE2_CCOUNT_OVERFLOW	0x001
+#define XSCALE2_COUNT0_OVERFLOW	0x002
+#define XSCALE2_COUNT1_OVERFLOW	0x004
+#define XSCALE2_COUNT2_OVERFLOW	0x008
+#define XSCALE2_COUNT3_OVERFLOW	0x010
+#define XSCALE2_CCOUNT_INT_EN	0x001
+#define XSCALE2_COUNT0_INT_EN	0x002
+#define XSCALE2_COUNT1_INT_EN	0x004
+#define XSCALE2_COUNT2_INT_EN	0x008
+#define XSCALE2_COUNT3_INT_EN	0x010
+#define XSCALE2_COUNT0_EVT_SHFT	0
+#define XSCALE2_COUNT0_EVT_MASK	(0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT	8
+#define XSCALE2_COUNT1_EVT_MASK	(0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT	16
+#define XSCALE2_COUNT2_EVT_MASK	(0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT	24
+#define XSCALE2_COUNT3_EVT_MASK	(0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+	u32 val;
+	asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+	/* bits 1-2 and 4-23 are read-unpredictable */
+	return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+	/* bits 4-23 are write-as-0, 24-31 are write ignored */
+	val &= 0xf;
+	asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+	u32 val;
+	asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+	return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+	asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+	u32 val;
+	asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+	return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+	asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+	u32 val;
+	asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+	return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+	asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+					enum xscale_counters counter)
+{
+	int ret = 0;
+
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+		break;
+	case XSCALE_COUNTER0:
+		ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+		break;
+	case XSCALE_COUNTER1:
+		ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+		break;
+	case XSCALE_COUNTER2:
+		ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+		break;
+	case XSCALE_COUNTER3:
+		ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+	}
+
+	return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(int irq_num, void *dev)
+{
+	unsigned long pmnc, of_flags;
+	struct perf_sample_data data;
+	struct pmu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	/* Disable the PMU. */
+	pmnc = xscale2pmu_read_pmnc();
+	xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+	/* Check the overflow flag register. */
+	of_flags = xscale2pmu_read_overflow_flags();
+	if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+		return IRQ_NONE;
+
+	/* Clear the overflow bits. */
+	xscale2pmu_write_overflow_flags(of_flags);
+
+	regs = get_irq_regs();
+
+	perf_sample_data_init(&data, 0);
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (!event)
+			continue;
+
+		if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			cpu_pmu->disable(hwc, idx);
+	}
+
+	irq_work_run();
+
+	/*
+	 * Re-enable the PMU.
+	 */
+	pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+	xscale2pmu_write_pmnc(pmnc);
+
+	return IRQ_HANDLED;
+}
+
+static void
+xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags, ien, evtsel;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	ien = xscale2pmu_read_int_enable();
+	evtsel = xscale2pmu_read_event_select();
+
+	switch (idx) {
+	case XSCALE_CYCLE_COUNTER:
+		ien |= XSCALE2_CCOUNT_INT_EN;
+		break;
+	case XSCALE_COUNTER0:
+		ien |= XSCALE2_COUNT0_INT_EN;
+		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+		evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+		break;
+	case XSCALE_COUNTER1:
+		ien |= XSCALE2_COUNT1_INT_EN;
+		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+		evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+		break;
+	case XSCALE_COUNTER2:
+		ien |= XSCALE2_COUNT2_INT_EN;
+		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+		evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+		break;
+	case XSCALE_COUNTER3:
+		ien |= XSCALE2_COUNT3_INT_EN;
+		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+		evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	xscale2pmu_write_event_select(evtsel);
+	xscale2pmu_write_int_enable(ien);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags, ien, evtsel, of_flags;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	ien = xscale2pmu_read_int_enable();
+	evtsel = xscale2pmu_read_event_select();
+
+	switch (idx) {
+	case XSCALE_CYCLE_COUNTER:
+		ien &= ~XSCALE2_CCOUNT_INT_EN;
+		of_flags = XSCALE2_CCOUNT_OVERFLOW;
+		break;
+	case XSCALE_COUNTER0:
+		ien &= ~XSCALE2_COUNT0_INT_EN;
+		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+		of_flags = XSCALE2_COUNT0_OVERFLOW;
+		break;
+	case XSCALE_COUNTER1:
+		ien &= ~XSCALE2_COUNT1_INT_EN;
+		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+		of_flags = XSCALE2_COUNT1_OVERFLOW;
+		break;
+	case XSCALE_COUNTER2:
+		ien &= ~XSCALE2_COUNT2_INT_EN;
+		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+		of_flags = XSCALE2_COUNT2_OVERFLOW;
+		break;
+	case XSCALE_COUNTER3:
+		ien &= ~XSCALE2_COUNT3_INT_EN;
+		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+		of_flags = XSCALE2_COUNT3_OVERFLOW;
+		break;
+	default:
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	xscale2pmu_write_event_select(evtsel);
+	xscale2pmu_write_int_enable(ien);
+	xscale2pmu_write_overflow_flags(of_flags);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
+			struct hw_perf_event *event)
+{
+	int idx = xscale1pmu_get_event_idx(cpuc, event);
+	if (idx >= 0)
+		goto out;
+
+	if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+		idx = XSCALE_COUNTER3;
+	else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+		idx = XSCALE_COUNTER2;
+out:
+	return idx;
+}
+
+static void
+xscale2pmu_start(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+	val |= XSCALE_PMU_ENABLE;
+	xscale2pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void
+xscale2pmu_stop(void)
+{
+	unsigned long flags, val;
+	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+	raw_spin_lock_irqsave(&events->pmu_lock, flags);
+	val = xscale2pmu_read_pmnc();
+	val &= ~XSCALE_PMU_ENABLE;
+	xscale2pmu_write_pmnc(val);
+	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static inline u32
+xscale2pmu_read_counter(int counter)
+{
+	u32 val = 0;
+
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER0:
+		asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER1:
+		asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER2:
+		asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+		break;
+	case XSCALE_COUNTER3:
+		asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+		break;
+	}
+
+	return val;
+}
+
+static inline void
+xscale2pmu_write_counter(int counter, u32 val)
+{
+	switch (counter) {
+	case XSCALE_CYCLE_COUNTER:
+		asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER0:
+		asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER1:
+		asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER2:
+		asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+		break;
+	case XSCALE_COUNTER3:
+		asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+		break;
+	}
+}
+
+static struct arm_pmu xscale2pmu = {
+	.id		= ARM_PERF_PMU_ID_XSCALE2,
+	.name		= "xscale2",
+	.handle_irq	= xscale2pmu_handle_irq,
+	.enable		= xscale2pmu_enable_event,
+	.disable	= xscale2pmu_disable_event,
+	.read_counter	= xscale2pmu_read_counter,
+	.write_counter	= xscale2pmu_write_counter,
+	.get_event_idx	= xscale2pmu_get_event_idx,
+	.start		= xscale2pmu_start,
+	.stop		= xscale2pmu_stop,
+	.map_event	= xscale_map_event,
+	.num_events	= 5,
+	.max_period	= (1LLU << 32) - 1,
+};
+
+static struct arm_pmu *__init xscale2pmu_init(void)
+{
+	return &xscale2pmu;
+}
+#else
+static struct arm_pmu *__init xscale1pmu_init(void)
+{
+	return NULL;
+}
+
+static struct arm_pmu *__init xscale2pmu_init(void)
+{
+	return NULL;
+}
+#endif	/* CONFIG_CPU_XSCALE */
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/pj4-cp0.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/pj4-cp0.c
new file mode 100644
index 0000000..679cf4d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/pj4-cp0.c
@@ -0,0 +1,93 @@
+/*
+ * linux/arch/arm/kernel/pj4-cp0.c
+ *
+ * PJ4 iWMMXt coprocessor context switching and handling
+ *
+ * Copyright (c) 2010 Marvell International Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/thread_notify.h>
+
+static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+	struct thread_info *thread = t;
+
+	switch (cmd) {
+	case THREAD_NOTIFY_FLUSH:
+		/*
+		 * flush_thread() zeroes thread->fpstate, so no need
+		 * to do anything here.
+		 *
+		 * FALLTHROUGH: Ensure we don't try to overwrite our newly
+		 * initialised state information on the first fault.
+		 */
+
+	case THREAD_NOTIFY_EXIT:
+		iwmmxt_task_release(thread);
+		break;
+
+	case THREAD_NOTIFY_SWITCH:
+		iwmmxt_task_switch(thread);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iwmmxt_notifier_block = {
+	.notifier_call	= iwmmxt_do,
+};
+
+
+static u32 __init pj4_cp_access_read(void)
+{
+	u32 value;
+
+	__asm__ __volatile__ (
+		"mrc	p15, 0, %0, c1, c0, 2\n\t"
+		: "=r" (value));
+	return value;
+}
+
+static void __init pj4_cp_access_write(u32 value)
+{
+	u32 temp;
+
+	__asm__ __volatile__ (
+		"mcr	p15, 0, %1, c1, c0, 2\n\t"
+		"mrc	p15, 0, %0, c1, c0, 2\n\t"
+		"mov	%0, %0\n\t"
+		"sub	pc, pc, #4\n\t"
+		: "=r" (temp) : "r" (value));
+}
+
+
+/*
+ * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+ * switch code handle iWMMXt context switching.
+ */
+static int __init pj4_cp0_init(void)
+{
+	u32 cp_access;
+
+	cp_access = pj4_cp_access_read() & ~0xf;
+	pj4_cp_access_write(cp_access);
+
+	printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
+	elf_hwcap |= HWCAP_IWMMXT;
+	thread_register_notifier(&iwmmxt_notifier_block);
+
+	return 0;
+}
+
+late_initcall(pj4_cp0_init);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/pmu.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/pmu.c
new file mode 100644
index 0000000..2334bf8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/pmu.c
@@ -0,0 +1,36 @@
+/*
+ *  linux/arch/arm/kernel/pmu.c
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *  Copyright (C) 2010 ARM Ltd, Will Deacon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/pmu.h>
+
+/*
+ * PMU locking to ensure mutual exclusion between different subsystems.
+ */
+static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
+
+int
+reserve_pmu(enum arm_pmu_type type)
+{
+	return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
+}
+EXPORT_SYMBOL_GPL(reserve_pmu);
+
+void
+release_pmu(enum arm_pmu_type type)
+{
+	clear_bit_unlock(type, pmu_lock);
+}
+EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/process.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/process.c
new file mode 100755
index 0000000..e0f534a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/process.c
@@ -0,0 +1,645 @@
+/*
+ *  linux/arch/arm/kernel/process.c
+ *
+ *  Copyright (C) 1996-2000 Russell King - Converted to ARM.
+ *  Original Copyright (C) 1995  Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdarg.h>
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/user.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/elfcore.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/utsname.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/cpuidle.h>
+#include <linux/console.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/thread_notify.h>
+#include <asm/stacktrace.h>
+#include <asm/mach/time.h>
+
+/*add for HUB: CVE-2014-9870*/
+#include <asm/tls.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+s64    pm_enter_time = 0;
+static const char *processor_modes[] = {
+  "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
+  "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
+  "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
+  "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
+};
+
+static const char *isa_modes[] = {
+  "ARM" , "Thumb" , "Jazelle", "ThumbEE"
+};
+
+extern void setup_mm_for_reboot(void);
+
+static volatile int hlt_counter;
+
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+	smp_send_all_cpu_backtrace();
+}
+#endif
+
+void disable_hlt(void)
+{
+	hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+	hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+static int __init nohlt_setup(char *__unused)
+{
+	hlt_counter = 1;
+	return 1;
+}
+
+static int __init hlt_setup(char *__unused)
+{
+	hlt_counter = 0;
+	return 1;
+}
+
+__setup("nohlt", nohlt_setup);
+__setup("hlt", hlt_setup);
+
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+typedef void (*phys_reset_t)(unsigned long);
+
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+	printk("\n");
+	pr_emerg("Restarting %s\n", linux_banner);
+	if (console_trylock()) {
+		console_unlock();
+		return;
+	}
+
+	mdelay(50);
+
+	local_irq_disable();
+	if (!console_trylock())
+		pr_emerg("arm_restart: Console was locked! Busting\n");
+	else
+		pr_emerg("arm_restart: Console was locked!\n");
+	console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
+/*
+ * A temporary stack to use for CPU reset. This is static so that we
+ * don't clobber it with the identity mapping. When running with this
+ * stack, any references to the current task *will not work* so you
+ * should really do as little as possible before jumping to your reset
+ * code.
+ */
+static u64 soft_restart_stack[16];
+
+static void __soft_restart(void *addr)
+{
+	phys_reset_t phys_reset;
+
+	/* Take out a flat memory mapping. */
+	setup_mm_for_reboot();
+
+	/* Clean and invalidate caches */
+	flush_cache_all();
+
+	/* Turn off caching */
+	cpu_proc_fin();
+
+	/* Push out any further dirty data, and ensure cache is empty */
+	flush_cache_all();
+
+	/* Switch to the identity mapping. */
+	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+	phys_reset((unsigned long)addr);
+
+	/* Should never get here. */
+	BUG();
+}
+
+void soft_restart(unsigned long addr)
+{
+	u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+	/* Disable interrupts first */
+	local_irq_disable();
+	local_fiq_disable();
+
+	/* Disable the L2 if we're the last man standing. */
+	if (num_online_cpus() == 1)
+		outer_disable();
+
+	/* Change to the new stack and continue with the reset. */
+	call_with_stack(__soft_restart, (void *)addr, (void *)stack);
+
+	/* Should never get here. */
+	BUG();
+}
+
+static void null_restart(char mode, const char *cmd)
+{
+}
+
+/*
+ * Function pointers to optional machine specific functions
+ */
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
+EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+	smp_mb();
+	/* kick all the CPUs so that they exit out of pm_idle */
+	smp_call_function(do_nothing, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+/*
+ * This is our default idle handler.
+ */
+extern void pm_idle_sram_print(void);
+
+void (*arm_pm_idle)(void);
+
+static void default_idle(void)
+{
+	if (arm_pm_idle)
+		arm_pm_idle();
+	else
+		cpu_do_idle();
+	local_irq_enable();
+#ifdef  CONFIG_PM
+	pm_idle_sram_print();
+#endif
+}
+
+void (*pm_idle)(void) = default_idle;
+EXPORT_SYMBOL(pm_idle);
+
+/*
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way.  The only difference
+ * is that we always respect 'hlt_counter' to prevent low power idle.
+ */
+void cpu_idle(void)
+{
+	local_fiq_enable();
+
+	/* endless idle loop with no priority at all */
+	while (1) {
+		idle_notifier_call_chain(IDLE_START);
+		tick_nohz_idle_enter();
+		rcu_idle_enter();
+		while (!need_resched()) {
+#ifdef CONFIG_HOTPLUG_CPU
+			if (cpu_is_offline(smp_processor_id()))
+				cpu_die();
+#endif
+
+			/*
+			 * We need to disable interrupts here
+			 * to ensure we don't miss a wakeup call.
+			 */
+			local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+			wmb();
+#endif
+			if (hlt_counter) {
+				local_irq_enable();
+				cpu_relax();
+			} else if (!need_resched()) {
+				stop_critical_timings();
+                pm_enter_time = ktime_to_us(ktime_get());
+				if (cpuidle_idle_call())
+					pm_idle();
+				start_critical_timings();
+				/*
+				 * pm_idle functions must always
+				 * return with IRQs enabled.
+				 */
+				WARN_ON(irqs_disabled());
+			} else
+				local_irq_enable();
+		}
+		rcu_idle_exit();
+		tick_nohz_idle_exit();
+		idle_notifier_call_chain(IDLE_END);
+		schedule_preempt_disabled();
+	}
+}
+
+static char reboot_mode = 'h';
+
+int __init reboot_setup(char *str)
+{
+	reboot_mode = str[0];
+	return 1;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_shutdown(void)
+{
+#ifdef CONFIG_SMP
+
+	preempt_disable();
+
+	smp_send_stop();
+#endif
+}
+
+void machine_halt(void)
+{
+	machine_shutdown();
+	local_irq_disable();
+	while (1);
+}
+
+void machine_power_off(void)
+{
+	machine_shutdown();
+	if (pm_power_off)
+		pm_power_off();
+}
+
+void machine_restart(char *cmd)
+{
+	machine_shutdown();
+	arm_machine_flush_console();
+
+	arm_pm_restart(reboot_mode, cmd);
+
+	/* Give a grace period for failure to restart of 1s */
+	mdelay(1000);
+
+	/* Whoops - the platform was unable to reboot. Tell the user! */
+	printk("Reboot failed -- System halted\n");
+	local_irq_disable();
+	while (1);
+}
+
+void __show_regs(struct pt_regs *regs)
+{
+	unsigned long flags;
+	char buf[64];
+
+	printk("CPU: %d    %s  (%s %.*s)\n",
+		raw_smp_processor_id(), print_tainted(),
+		init_utsname()->release,
+		(int)strcspn(init_utsname()->version, " "),
+		init_utsname()->version);
+	print_symbol("PC is at %s\n", instruction_pointer(regs));
+	print_symbol("LR is at %s\n", regs->ARM_lr);
+	printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
+	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
+		regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+		regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
+	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
+		regs->ARM_r10, regs->ARM_r9,
+		regs->ARM_r8);
+	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
+		regs->ARM_r7, regs->ARM_r6,
+		regs->ARM_r5, regs->ARM_r4);
+	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
+		regs->ARM_r3, regs->ARM_r2,
+		regs->ARM_r1, regs->ARM_r0);
+
+	flags = regs->ARM_cpsr;
+	buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
+	buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
+	buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
+	buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
+	buf[4] = '\0';
+
+	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+		buf, interrupts_enabled(regs) ? "n" : "ff",
+		fast_interrupts_enabled(regs) ? "n" : "ff",
+		processor_modes[processor_mode(regs)],
+		isa_modes[isa_mode(regs)],
+		get_fs() == get_ds() ? "kernel" : "user");
+#ifdef CONFIG_CPU_CP15
+	{
+		unsigned int ctrl;
+
+		buf[0] = '\0';
+#ifdef CONFIG_CPU_CP15_MMU
+		{
+			unsigned int transbase, dac;
+			asm("mrc p15, 0, %0, c2, c0\n\t"
+			    "mrc p15, 0, %1, c3, c0\n"
+			    : "=r" (transbase), "=r" (dac));
+			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
+			  	transbase, dac);
+		}
+#endif
+		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
+
+		printk("Control: %08x%s\n", ctrl, buf);
+	}
+#endif
+
+	//show_extra_register_data(regs, 128);
+}
+
+void show_regs(struct pt_regs * regs)
+{
+	printk("\n");
+	printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
+	__show_regs(regs);
+	dump_stack();
+}
+
+ATOMIC_NOTIFIER_HEAD(thread_notify_head);
+
+EXPORT_SYMBOL_GPL(thread_notify_head);
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+	thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
+}
+
+void flush_thread(void)
+{
+	struct thread_info *thread = current_thread_info();
+	struct task_struct *tsk = current;
+
+	flush_ptrace_hw_breakpoint(tsk);
+
+	memset(thread->used_cp, 0, sizeof(thread->used_cp));
+	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+	memset(&thread->fpstate, 0, sizeof(union fp_state));
+
+	thread_notify(THREAD_NOTIFY_FLUSH, thread);
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int
+copy_thread(unsigned long clone_flags, unsigned long stack_start,
+	    unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
+{
+	struct thread_info *thread = task_thread_info(p);
+	struct pt_regs *childregs = task_pt_regs(p);
+
+	*childregs = *regs;
+	childregs->ARM_r0 = 0;
+	childregs->ARM_sp = stack_start;
+
+	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+	thread->cpu_context.sp = (unsigned long)childregs;
+	thread->cpu_context.pc = (unsigned long)ret_from_fork;
+
+	clear_ptrace_hw_breakpoint(p);
+
+	/*Fix for HUB: CVE-2014-9870*/
+	if (clone_flags & CLONE_SETTLS)
+		thread->tp_value[0] = childregs->ARM_r3;
+	thread->tp_value[1] = get_tpuser();
+
+	thread_notify(THREAD_NOTIFY_COPY, thread);
+
+	return 0;
+}
+
+/*
+ * Fill in the task's elfregs structure for a core dump.
+ */
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
+{
+	elf_core_copy_regs(elfregs, task_pt_regs(t));
+	return 1;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
+{
+	struct thread_info *thread = current_thread_info();
+	int used_math = thread->used_cp[1] | thread->used_cp[2];
+
+	if (used_math)
+		memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
+
+	return used_math != 0;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * Shuffle the argument into the correct register before calling the
+ * thread function.  r4 is the thread argument, r5 is the pointer to
+ * the thread function, and r6 points to the exit function.
+ */
+extern void kernel_thread_helper(void);
+asm(	".pushsection .text\n"
+"	.align\n"
+"	.type	kernel_thread_helper, #function\n"
+"kernel_thread_helper:\n"
+#ifdef CONFIG_TRACE_IRQFLAGS
+"	bl	trace_hardirqs_on\n"
+#endif
+"	msr	cpsr_c, r7\n"
+"	mov	r0, r4\n"
+"	mov	lr, r6\n"
+"	mov	pc, r5\n"
+"	.size	kernel_thread_helper, . - kernel_thread_helper\n"
+"	.popsection");
+
+#ifdef CONFIG_ARM_UNWIND
+extern void kernel_thread_exit(long code);
+asm(	".pushsection .text\n"
+"	.align\n"
+"	.type	kernel_thread_exit, #function\n"
+"kernel_thread_exit:\n"
+"	.fnstart\n"
+"	.cantunwind\n"
+"	bl	do_exit\n"
+"	nop\n"
+"	.fnend\n"
+"	.size	kernel_thread_exit, . - kernel_thread_exit\n"
+"	.popsection");
+#else
+#define kernel_thread_exit	do_exit
+#endif
+
+/*
+ * Create a kernel thread.
+ */
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+	struct pt_regs regs;
+
+	memset(&regs, 0, sizeof(regs));
+
+	regs.ARM_r4 = (unsigned long)arg;
+	regs.ARM_r5 = (unsigned long)fn;
+	regs.ARM_r6 = (unsigned long)kernel_thread_exit;
+	regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
+	regs.ARM_pc = (unsigned long)kernel_thread_helper;
+	regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
+
+	return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	struct stackframe frame;
+	unsigned long stack_page;
+	int count = 0;
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	frame.fp = thread_saved_fp(p);
+	frame.sp = thread_saved_sp(p);
+	frame.lr = 0;			/* recovered from the stack */
+	frame.pc = thread_saved_pc(p);
+	stack_page = (unsigned long)task_stack_page(p);
+	do {
+		if (frame.sp < stack_page ||
+		    frame.sp >= stack_page + THREAD_SIZE ||
+		    unwind_frame(&frame) < 0)
+			return 0;
+		if (!in_sched_functions(frame.pc))
+			return frame.pc;
+	} while (count ++ < 16);
+	return 0;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+	unsigned long range_end = mm->brk + 0x02000000;
+	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+}
+
+#ifdef CONFIG_MMU
+
+/*
+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
+ * fail.
+ */
+static int __init vectors_user_mapping_init_page(void)
+{
+	struct page *page;
+	unsigned long addr = 0xffff0000;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset_k(addr);
+	pud = pud_offset(pgd, addr);
+	pmd = pmd_offset(pud, addr);
+	page = pmd_page(*(pmd));
+
+	pgtable_page_ctor(page);
+
+	return 0;
+}
+late_initcall(vectors_user_mapping_init_page);
+
+/*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Insert it into the
+ * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ */
+static struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+	gate_vma.vm_start	= 0xffff0000;
+	gate_vma.vm_end		= 0xffff0000 + PAGE_SIZE;
+	gate_vma.vm_page_prot	= PAGE_READONLY_EXEC;
+	gate_vma.vm_flags	= VM_READ | VM_EXEC |
+				  VM_MAYREAD | VM_MAYEXEC;
+	return 0;
+}
+arch_initcall(gate_vma_init);
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+	return &gate_vma;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+	return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
+}
+
+int in_gate_area_no_mm(unsigned long addr)
+{
+	return in_gate_area(NULL, addr);
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+	return (vma == &gate_vma) ? "[vectors]" : NULL;
+}
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/ptrace.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/ptrace.c
new file mode 100644
index 0000000..bd80341
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/ptrace.c
@@ -0,0 +1,950 @@
+/*
+ *  linux/arch/arm/kernel/ptrace.c
+ *
+ *  By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ * ARM modifications Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/security.h>
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/regset.h>
+#include <linux/audit.h>
+
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+
+#define REG_PC	15
+#define REG_PSR	16
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+#if 0
+/*
+ * Breakpoint SWI instruction: SWI &9F0001
+ */
+#define BREAKINST_ARM	0xef9f0001
+#define BREAKINST_THUMB	0xdf00		/* fill this in later */
+#else
+/*
+ * New breakpoints - use an undefined instruction.  The ARM architecture
+ * reference manual guarantees that the following instruction space
+ * will produce an undefined instruction exception on all CPUs:
+ *
+ *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
+ *  Thumb: 1101 1110 xxxx xxxx
+ */
+#define BREAKINST_ARM	0xe7f001f0
+#define BREAKINST_THUMB	0xde01
+#endif
+
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define REG_OFFSET_NAME(r) \
+	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	REG_OFFSET_NAME(r0),
+	REG_OFFSET_NAME(r1),
+	REG_OFFSET_NAME(r2),
+	REG_OFFSET_NAME(r3),
+	REG_OFFSET_NAME(r4),
+	REG_OFFSET_NAME(r5),
+	REG_OFFSET_NAME(r6),
+	REG_OFFSET_NAME(r7),
+	REG_OFFSET_NAME(r8),
+	REG_OFFSET_NAME(r9),
+	REG_OFFSET_NAME(r10),
+	REG_OFFSET_NAME(fp),
+	REG_OFFSET_NAME(ip),
+	REG_OFFSET_NAME(sp),
+	REG_OFFSET_NAME(lr),
+	REG_OFFSET_NAME(pc),
+	REG_OFFSET_NAME(cpsr),
+	REG_OFFSET_NAME(ORIG_r0),
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:	the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+	const struct pt_regs_offset *roff;
+	for (roff = regoffset_table; roff->name != NULL; roff++)
+		if (!strcmp(roff->name, name))
+			return roff->offset;
+	return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset:	the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+	const struct pt_regs_offset *roff;
+	for (roff = regoffset_table; roff->name != NULL; roff++)
+		if (roff->offset == offset)
+			return roff->name;
+	return NULL;
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:      pt_regs which contains kernel stack pointer.
+ * @addr:      address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:	pt_regs which contains kernel stack pointer.
+ * @n:		stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_user_reg(struct task_struct *task, int offset)
+{
+	return task_pt_regs(task)->uregs[offset];
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int
+put_user_reg(struct task_struct *task, int offset, long data)
+{
+	struct pt_regs newregs, *regs = task_pt_regs(task);
+	int ret = -EINVAL;
+
+	newregs = *regs;
+	newregs.uregs[offset] = data;
+
+	if (valid_user_regs(&newregs)) {
+		regs->uregs[offset] = data;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	/* Nothing to do. */
+}
+
+/*
+ * Handle hitting a breakpoint.
+ */
+void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
+{
+	siginfo_t info;
+
+	info.si_signo = SIGTRAP;
+	info.si_errno = 0;
+	info.si_code  = TRAP_BRKPT;
+	info.si_addr  = (void __user *)instruction_pointer(regs);
+
+	force_sig_info(SIGTRAP, &info, tsk);
+}
+
+static int break_trap(struct pt_regs *regs, unsigned int instr)
+{
+	ptrace_break(current, regs);
+	return 0;
+}
+
+static struct undef_hook arm_break_hook = {
+	.instr_mask	= 0x0fffffff,
+	.instr_val	= 0x07f001f0,
+	.cpsr_mask	= PSR_T_BIT,
+	.cpsr_val	= 0,
+	.fn		= break_trap,
+};
+
+static struct undef_hook thumb_break_hook = {
+	.instr_mask	= 0xffff,
+	.instr_val	= 0xde01,
+	.cpsr_mask	= PSR_T_BIT,
+	.cpsr_val	= PSR_T_BIT,
+	.fn		= break_trap,
+};
+
+static struct undef_hook thumb2_break_hook = {
+	.instr_mask	= 0xffffffff,
+	.instr_val	= 0xf7f0a000,
+	.cpsr_mask	= PSR_T_BIT,
+	.cpsr_val	= PSR_T_BIT,
+	.fn		= break_trap,
+};
+
+static int __init ptrace_break_init(void)
+{
+	register_undef_hook(&arm_break_hook);
+	register_undef_hook(&thumb_break_hook);
+	register_undef_hook(&thumb2_break_hook);
+	return 0;
+}
+
+core_initcall(ptrace_break_init);
+
+/*
+ * Read the word at offset "off" into the "struct user".  We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+			    unsigned long __user *ret)
+{
+	unsigned long tmp;
+
+	if (off & 3)
+		return -EIO;
+
+	tmp = 0;
+	if (off == PT_TEXT_ADDR)
+		tmp = tsk->mm->start_code;
+	else if (off == PT_DATA_ADDR)
+		tmp = tsk->mm->start_data;
+	else if (off == PT_TEXT_END_ADDR)
+		tmp = tsk->mm->end_code;
+	else if (off < sizeof(struct pt_regs))
+		tmp = get_user_reg(tsk, off >> 2);
+	else if (off >= sizeof(struct user))
+		return -EIO;
+
+	return put_user(tmp, ret);
+}
+
+/*
+ * Write the word at offset "off" into "struct user".  We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+			     unsigned long val)
+{
+	if (off & 3 || off >= sizeof(struct user))
+		return -EIO;
+
+	if (off >= sizeof(struct pt_regs))
+		return 0;
+
+	return put_user_reg(tsk, off >> 2, val);
+}
+
+#ifdef CONFIG_IWMMXT
+
+/*
+ * Get the child iWMMXt state.
+ */
+static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
+{
+	struct thread_info *thread = task_thread_info(tsk);
+
+	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
+		return -ENODATA;
+	iwmmxt_task_disable(thread);  /* force it to ram */
+	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
+		? -EFAULT : 0;
+}
+
+/*
+ * Set the child iWMMXt state.
+ */
+static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
+{
+	struct thread_info *thread = task_thread_info(tsk);
+
+	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
+		return -EACCES;
+	iwmmxt_task_release(thread);  /* force a reload */
+	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
+		? -EFAULT : 0;
+}
+
+#endif
+
+#ifdef CONFIG_CRUNCH
+/*
+ * Get the child Crunch state.
+ */
+static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+	struct thread_info *thread = task_thread_info(tsk);
+
+	crunch_task_disable(thread);  /* force it to ram */
+	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
+		? -EFAULT : 0;
+}
+
+/*
+ * Set the child Crunch state.
+ */
+static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
+{
+	struct thread_info *thread = task_thread_info(tsk);
+
+	crunch_task_release(thread);  /* force a reload */
+	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
+		? -EFAULT : 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+/*
+ * Convert a virtual register number into an index for a thread_info
+ * breakpoint array. Breakpoints are identified using positive numbers
+ * whilst watchpoints are negative. The registers are laid out as pairs
+ * of (address, control), each pair mapping to a unique hw_breakpoint struct.
+ * Register 0 is reserved for describing resource information.
+ */
+static int ptrace_hbp_num_to_idx(long num)
+{
+	if (num < 0)
+		num = (ARM_MAX_BRP << 1) - num;
+	return (num - 1) >> 1;
+}
+
+/*
+ * Returns the virtual register number for the address of the
+ * breakpoint at index idx.
+ */
+static long ptrace_hbp_idx_to_num(int idx)
+{
+	long mid = ARM_MAX_BRP << 1;
+	long num = (idx << 1) + 1;
+	return num > mid ? mid - num : num;
+}
+
+/*
+ * Handle hitting a HW-breakpoint.
+ */
+static void ptrace_hbptriggered(struct perf_event *bp,
+				     struct perf_sample_data *data,
+				     struct pt_regs *regs)
+{
+	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+	long num;
+	int i;
+	siginfo_t info;
+
+	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
+		if (current->thread.debug.hbp[i] == bp)
+			break;
+
+	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
+
+	info.si_signo	= SIGTRAP;
+	info.si_errno	= (int)num;
+	info.si_code	= TRAP_HWBKPT;
+	info.si_addr	= (void __user *)(bkpt->trigger);
+
+	force_sig_info(SIGTRAP, &info, current);
+}
+
+/*
+ * Set ptrace breakpoint pointers to zero for this task.
+ * This is required in order to prevent child processes from unregistering
+ * breakpoints held by their parent.
+ */
+void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
+}
+
+/*
+ * Unregister breakpoints from this task and reset the pointers in
+ * the thread_struct.
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+	int i;
+	struct thread_struct *t = &tsk->thread;
+
+	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
+		if (t->debug.hbp[i]) {
+			unregister_hw_breakpoint(t->debug.hbp[i]);
+			t->debug.hbp[i] = NULL;
+		}
+	}
+}
+
+static u32 ptrace_get_hbp_resource_info(void)
+{
+	u8 num_brps, num_wrps, debug_arch, wp_len;
+	u32 reg = 0;
+
+	num_brps	= hw_breakpoint_slots(TYPE_INST);
+	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
+	debug_arch	= arch_get_debug_arch();
+	wp_len		= arch_get_max_wp_len();
+
+	reg		|= debug_arch;
+	reg		<<= 8;
+	reg		|= wp_len;
+	reg		<<= 8;
+	reg		|= num_wrps;
+	reg		<<= 8;
+	reg		|= num_brps;
+
+	return reg;
+}
+
+static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
+{
+	struct perf_event_attr attr;
+
+	ptrace_breakpoint_init(&attr);
+
+	/* Initialise fields to sane defaults. */
+	attr.bp_addr	= 0;
+	attr.bp_len	= HW_BREAKPOINT_LEN_4;
+	attr.bp_type	= type;
+	attr.disabled	= 1;
+
+	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
+					   tsk);
+}
+
+static int ptrace_gethbpregs(struct task_struct *tsk, long num,
+			     unsigned long  __user *data)
+{
+	u32 reg;
+	int idx, ret = 0;
+	struct perf_event *bp;
+	struct arch_hw_breakpoint_ctrl arch_ctrl;
+
+	if (num == 0) {
+		reg = ptrace_get_hbp_resource_info();
+	} else {
+		idx = ptrace_hbp_num_to_idx(num);
+		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		bp = tsk->thread.debug.hbp[idx];
+		if (!bp) {
+			reg = 0;
+			goto put;
+		}
+
+		arch_ctrl = counter_arch_bp(bp)->ctrl;
+
+		/*
+		 * Fix up the len because we may have adjusted it
+		 * to compensate for an unaligned address.
+		 */
+		while (!(arch_ctrl.len & 0x1))
+			arch_ctrl.len >>= 1;
+
+		if (num & 0x1)
+			reg = bp->attr.bp_addr;
+		else
+			reg = encode_ctrl_reg(arch_ctrl);
+	}
+
+put:
+	if (put_user(reg, data))
+		ret = -EFAULT;
+
+out:
+	return ret;
+}
+
+static int ptrace_sethbpregs(struct task_struct *tsk, long num,
+			     unsigned long __user *data)
+{
+	int idx, gen_len, gen_type, implied_type, ret = 0;
+	u32 user_val;
+	struct perf_event *bp;
+	struct arch_hw_breakpoint_ctrl ctrl;
+	struct perf_event_attr attr;
+
+	if (num == 0)
+		goto out;
+	else if (num < 0)
+		implied_type = HW_BREAKPOINT_RW;
+	else
+		implied_type = HW_BREAKPOINT_X;
+
+	idx = ptrace_hbp_num_to_idx(num);
+	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (get_user(user_val, data)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	bp = tsk->thread.debug.hbp[idx];
+	if (!bp) {
+		bp = ptrace_hbp_create(tsk, implied_type);
+		if (IS_ERR(bp)) {
+			ret = PTR_ERR(bp);
+			goto out;
+		}
+		tsk->thread.debug.hbp[idx] = bp;
+	}
+
+	attr = bp->attr;
+
+	if (num & 0x1) {
+		/* Address */
+		attr.bp_addr	= user_val;
+	} else {
+		/* Control */
+		decode_ctrl_reg(user_val, &ctrl);
+		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
+		if (ret)
+			goto out;
+
+		if ((gen_type & implied_type) != gen_type) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		attr.bp_len	= gen_len;
+		attr.bp_type	= gen_type;
+		attr.disabled	= !ctrl.enabled;
+	}
+
+	ret = modify_user_hw_breakpoint(bp, &attr);
+out:
+	return ret;
+}
+#endif
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user *ubuf)
+{
+	struct pt_regs *regs = task_pt_regs(target);
+
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				   regs,
+				   0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+	struct pt_regs newregs;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				 &newregs,
+				 0, sizeof(newregs));
+	if (ret)
+		return ret;
+
+	if (!valid_user_regs(&newregs))
+		return -EINVAL;
+
+	*task_pt_regs(target) = newregs;
+	return 0;
+}
+
+static int fpa_get(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user *ubuf)
+{
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				   &task_thread_info(target)->fpstate,
+				   0, sizeof(struct user_fp));
+}
+
+static int fpa_set(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   const void *kbuf, const void __user *ubuf)
+{
+	struct thread_info *thread = task_thread_info(target);
+
+	thread->used_cp[1] = thread->used_cp[2] = 1;
+
+	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+		&thread->fpstate,
+		0, sizeof(struct user_fp));
+}
+
+#ifdef CONFIG_VFP
+/*
+ * VFP register get/set implementations.
+ *
+ * With respect to the kernel, struct user_fp is divided into three chunks:
+ * 16 or 32 real VFP registers (d0-d15 or d0-31)
+ *	These are transferred to/from the real registers in the task's
+ *	vfp_hard_struct.  The number of registers depends on the kernel
+ *	configuration.
+ *
+ * 16 or 0 fake VFP registers (d16-d31 or empty)
+ *	i.e., the user_vfp structure has space for 32 registers even if
+ *	the kernel doesn't have them all.
+ *
+ *	vfp_get() reads this chunk as zero where applicable
+ *	vfp_set() ignores this chunk
+ *
+ * 1 word for the FPSCR
+ *
+ * The bounds-checking logic built into user_regset_copyout and friends
+ * means that we can make a simple sequence of calls to map the relevant data
+ * to/from the specified slice of the user regset structure.
+ */
+static int vfp_get(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user *ubuf)
+{
+	int ret;
+	struct thread_info *thread = task_thread_info(target);
+	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
+	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
+	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
+
+	vfp_sync_hwstate(thread);
+
+	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				  &vfp->fpregs,
+				  user_fpregs_offset,
+				  user_fpregs_offset + sizeof(vfp->fpregs));
+	if (ret)
+		return ret;
+
+	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+				       user_fpregs_offset + sizeof(vfp->fpregs),
+				       user_fpscr_offset);
+	if (ret)
+		return ret;
+
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+				   &vfp->fpscr,
+				   user_fpscr_offset,
+				   user_fpscr_offset + sizeof(vfp->fpscr));
+}
+
+/*
+ * For vfp_set() a read-modify-write is done on the VFP registers,
+ * in order to avoid writing back a half-modified set of registers on
+ * failure.
+ */
+static int vfp_set(struct task_struct *target,
+			  const struct user_regset *regset,
+			  unsigned int pos, unsigned int count,
+			  const void *kbuf, const void __user *ubuf)
+{
+	int ret;
+	struct thread_info *thread = task_thread_info(target);
+	struct vfp_hard_struct new_vfp;
+	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
+	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
+
+	vfp_sync_hwstate(thread);
+	new_vfp = thread->vfpstate.hard;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				  &new_vfp.fpregs,
+				  user_fpregs_offset,
+				  user_fpregs_offset + sizeof(new_vfp.fpregs));
+	if (ret)
+		return ret;
+
+	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+				user_fpregs_offset + sizeof(new_vfp.fpregs),
+				user_fpscr_offset);
+	if (ret)
+		return ret;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				 &new_vfp.fpscr,
+				 user_fpscr_offset,
+				 user_fpscr_offset + sizeof(new_vfp.fpscr));
+	if (ret)
+		return ret;
+
+	vfp_flush_hwstate(thread);
+	thread->vfpstate.hard = new_vfp;
+
+	return 0;
+}
+#endif /* CONFIG_VFP */
+
+enum arm_regset {
+	REGSET_GPR,
+	REGSET_FPR,
+#ifdef CONFIG_VFP
+	REGSET_VFP,
+#endif
+};
+
+static const struct user_regset arm_regsets[] = {
+	[REGSET_GPR] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = ELF_NGREG,
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = gpr_get,
+		.set = gpr_set
+	},
+	[REGSET_FPR] = {
+		/*
+		 * For the FPA regs in fpstate, the real fields are a mixture
+		 * of sizes, so pretend that the registers are word-sized:
+		 */
+		.core_note_type = NT_PRFPREG,
+		.n = sizeof(struct user_fp) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = fpa_get,
+		.set = fpa_set
+	},
+#ifdef CONFIG_VFP
+	[REGSET_VFP] = {
+		/*
+		 * Pretend that the VFP regs are word-sized, since the FPSCR is
+		 * a single word dangling at the end of struct user_vfp:
+		 */
+		.core_note_type = NT_ARM_VFP,
+		.n = ARM_VFPREGS_SIZE / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = vfp_get,
+		.set = vfp_set
+	},
+#endif /* CONFIG_VFP */
+};
+
+static const struct user_regset_view user_arm_view = {
+	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
+	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &user_arm_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	int ret;
+	unsigned long __user *datap = (unsigned long __user *) data;
+
+	switch (request) {
+		case PTRACE_PEEKUSR:
+			ret = ptrace_read_user(child, addr, datap);
+			break;
+
+		case PTRACE_POKEUSR:
+			ret = ptrace_write_user(child, addr, data);
+			break;
+
+		case PTRACE_GETREGS:
+			ret = copy_regset_to_user(child,
+						  &user_arm_view, REGSET_GPR,
+						  0, sizeof(struct pt_regs),
+						  datap);
+			break;
+
+		case PTRACE_SETREGS:
+			ret = copy_regset_from_user(child,
+						    &user_arm_view, REGSET_GPR,
+						    0, sizeof(struct pt_regs),
+						    datap);
+			break;
+
+		case PTRACE_GETFPREGS:
+			ret = copy_regset_to_user(child,
+						  &user_arm_view, REGSET_FPR,
+						  0, sizeof(union fp_state),
+						  datap);
+			break;
+
+		case PTRACE_SETFPREGS:
+			ret = copy_regset_from_user(child,
+						    &user_arm_view, REGSET_FPR,
+						    0, sizeof(union fp_state),
+						    datap);
+			break;
+
+#ifdef CONFIG_IWMMXT
+		case PTRACE_GETWMMXREGS:
+			ret = ptrace_getwmmxregs(child, datap);
+			break;
+
+		case PTRACE_SETWMMXREGS:
+			ret = ptrace_setwmmxregs(child, datap);
+			break;
+#endif
+
+		case PTRACE_GET_THREAD_AREA:
+			/*Fix for HUB: CVE-2014-9870*/
+			ret = put_user(task_thread_info(child)->tp_value[0],
+				       datap);
+			break;
+
+		case PTRACE_SET_SYSCALL:
+			task_thread_info(child)->syscall = data;
+			ret = 0;
+			break;
+
+#ifdef CONFIG_CRUNCH
+		case PTRACE_GETCRUNCHREGS:
+			ret = ptrace_getcrunchregs(child, datap);
+			break;
+
+		case PTRACE_SETCRUNCHREGS:
+			ret = ptrace_setcrunchregs(child, datap);
+			break;
+#endif
+
+#ifdef CONFIG_VFP
+		case PTRACE_GETVFPREGS:
+			ret = copy_regset_to_user(child,
+						  &user_arm_view, REGSET_VFP,
+						  0, ARM_VFPREGS_SIZE,
+						  datap);
+			break;
+
+		case PTRACE_SETVFPREGS:
+			ret = copy_regset_from_user(child,
+						    &user_arm_view, REGSET_VFP,
+						    0, ARM_VFPREGS_SIZE,
+						    datap);
+			break;
+#endif
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+		case PTRACE_GETHBPREGS:
+			if (ptrace_get_breakpoints(child) < 0)
+				return -ESRCH;
+
+			ret = ptrace_gethbpregs(child, addr,
+						(unsigned long __user *)data);
+			ptrace_put_breakpoints(child);
+			break;
+		case PTRACE_SETHBPREGS:
+			if (ptrace_get_breakpoints(child) < 0)
+				return -ESRCH;
+
+			ret = ptrace_sethbpregs(child, addr,
+						(unsigned long __user *)data);
+			ptrace_put_breakpoints(child);
+			break;
+#endif
+
+		default:
+			ret = ptrace_request(child, request, addr, data);
+			break;
+	}
+
+	return ret;
+}
+
+asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+{
+	unsigned long ip;
+
+	if (why)
+		audit_syscall_exit(regs);
+	else
+		audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
+				    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+
+	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+		return scno;
+	if (!(current->ptrace & PT_PTRACED))
+		return scno;
+
+	current_thread_info()->syscall = scno;
+
+	/*
+	 * IP is used to denote syscall entry/exit:
+	 * IP = 0 -> entry, =1 -> exit
+	 */
+	ip = regs->ARM_ip;
+	regs->ARM_ip = why;
+
+	/* the 0x80 provides a way for the tracing parent to distinguish
+	   between a syscall stop and SIGTRAP delivery */
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+	regs->ARM_ip = ip;
+
+	return current_thread_info()->syscall;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/relocate_kernel.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/relocate_kernel.S
new file mode 100644
index 0000000..d0cdedf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/relocate_kernel.S
@@ -0,0 +1,88 @@
+/*
+ * relocate_kernel.S - put the kernel image in place to boot
+ */
+
+#include <asm/kexec.h>
+
+	.globl relocate_new_kernel
+relocate_new_kernel:
+
+	ldr	r0,kexec_indirection_page
+	ldr	r1,kexec_start_address
+
+	/*
+	 * If there is no indirection page (we are doing crashdumps)
+	 * skip any relocation.
+	 */
+	cmp	r0, #0
+	beq	2f
+
+0:	/* top, read another word for the indirection page */
+	ldr	r3, [r0],#4
+
+	/* Is it a destination page. Put destination address to r4 */
+	tst	r3,#1,0
+	beq	1f
+	bic	r4,r3,#1
+	b	0b
+1:
+	/* Is it an indirection page */
+	tst	r3,#2,0
+	beq	1f
+	bic	r0,r3,#2
+	b	0b
+1:
+
+	/* are we done ? */
+	tst	r3,#4,0
+	beq	1f
+	b	2f
+
+1:
+	/* is it source ? */
+	tst	r3,#8,0
+	beq	0b
+	bic r3,r3,#8
+	mov r6,#1024
+9:
+	ldr r5,[r3],#4
+	str r5,[r4],#4
+	subs r6,r6,#1
+	bne 9b
+	b 0b
+
+2:
+	/* Jump to relocated kernel */
+	mov lr,r1
+	mov r0,#0
+	ldr r1,kexec_mach_type
+	ldr r2,kexec_boot_atags
+ ARM(	mov pc, lr	)
+ THUMB(	bx lr		)
+
+	.align
+
+	.globl kexec_start_address
+kexec_start_address:
+	.long	0x0
+
+	.globl kexec_indirection_page
+kexec_indirection_page:
+	.long	0x0
+
+	.globl kexec_mach_type
+kexec_mach_type:
+	.long	0x0
+
+	/* phy addr of the atags for the new kernel */
+	.globl kexec_boot_atags
+kexec_boot_atags:
+	.long	0x0
+
+relocate_new_kernel_end:
+
+	.globl relocate_new_kernel_size
+relocate_new_kernel_size:
+	.long relocate_new_kernel_end - relocate_new_kernel
+
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/return_address.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/return_address.c
new file mode 100644
index 0000000..8085417
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/return_address.c
@@ -0,0 +1,72 @@
+/*
+ * arch/arm/kernel/return_address.c
+ *
+ * Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ * for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/ftrace.h>
+
+#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
+#include <linux/sched.h>
+
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+	unsigned int level;
+	void *addr;
+};
+
+static int save_return_addr(struct stackframe *frame, void *d)
+{
+	struct return_address_data *data = d;
+
+	if (!data->level) {
+		data->addr = (void *)frame->lr;
+
+		return 1;
+	} else {
+		--data->level;
+		return 0;
+	}
+}
+
+void *return_address(unsigned int level)
+{
+	struct return_address_data data;
+	struct stackframe frame;
+	register unsigned long current_sp asm ("sp");
+
+	data.level = level + 1;
+
+	frame.fp = (unsigned long)__builtin_frame_address(0);
+	frame.sp = current_sp;
+	frame.lr = (unsigned long)__builtin_return_address(0);
+	frame.pc = (unsigned long)return_address;
+
+	walk_stackframe(&frame, save_return_addr, &data);
+
+	if (!data.level)
+		return data.addr;
+	else
+		return NULL;
+}
+
+#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
+
+#if defined(CONFIG_ARM_UNWIND)
+#warning "TODO: return_address should use unwind tables"
+#endif
+
+void *return_address(unsigned int level)
+{
+	return NULL;
+}
+
+#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/sched_clock.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sched_clock.c
new file mode 100644
index 0000000..63bc22c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sched_clock.c
@@ -0,0 +1,208 @@
+/*
+ * sched_clock.c: support for extending counters to full 64-bit ns counter
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/syscore_ops.h>
+#include <linux/timer.h>
+
+#include <asm/sched_clock.h>
+
+struct clock_data {
+	u64 epoch_ns;
+	u32 epoch_cyc;
+	u32 epoch_cyc_copy;
+	u32 mult;
+	u32 shift;
+	bool suspended;
+	bool needs_suspend;
+};
+
+static void sched_clock_poll(unsigned long wrap_ticks);
+static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
+
+static struct clock_data cd = {
+	.mult	= NSEC_PER_SEC / HZ,
+};
+
+static u32 __read_mostly sched_clock_mask = 0xffffffff;
+
+static u32 notrace jiffy_sched_clock_read(void)
+{
+	return (u32)(jiffies - INITIAL_JIFFIES);
+}
+
+static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+{
+	return (cyc * mult) >> shift;
+}
+
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+{
+	u64 epoch_ns;
+	u32 epoch_cyc;
+
+	if (cd.suspended)
+		return cd.epoch_ns;
+
+	/*
+	 * Load the epoch_cyc and epoch_ns atomically.  We do this by
+	 * ensuring that we always write epoch_cyc, epoch_ns and
+	 * epoch_cyc_copy in strict order, and read them in strict order.
+	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
+	 * the middle of an update, and we should repeat the load.
+	 */
+	do {
+		epoch_cyc = cd.epoch_cyc;
+		smp_rmb();
+		epoch_ns = cd.epoch_ns;
+		smp_rmb();
+	} while (epoch_cyc != cd.epoch_cyc_copy);
+
+	return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+}
+
+/*
+ * Atomically update the sched_clock epoch.
+ */
+static void notrace update_sched_clock(void)
+{
+	unsigned long flags;
+	u32 cyc;
+	u64 ns;
+
+	cyc = read_sched_clock();
+	ns = cd.epoch_ns +
+		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+			  cd.mult, cd.shift);
+	/*
+	 * Write epoch_cyc and epoch_ns in a way that the update is
+	 * detectable in cyc_to_fixed_sched_clock().
+	 */
+	raw_local_irq_save(flags);
+	cd.epoch_cyc_copy = cyc;
+	smp_wmb();
+	cd.epoch_ns = ns;
+	smp_wmb();
+	cd.epoch_cyc = cyc;
+	raw_local_irq_restore(flags);
+}
+
+static void sched_clock_poll(unsigned long wrap_ticks)
+{
+	mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
+	update_sched_clock();
+}
+
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+		unsigned long rate)
+{
+	setup_sched_clock(read, bits, rate);
+	cd.needs_suspend = true;
+}
+
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
+{
+	unsigned long r, w;
+	u64 res, wrap;
+	char r_unit;
+
+	BUG_ON(bits > 32);
+	WARN_ON(!irqs_disabled());
+	WARN_ON(read_sched_clock != jiffy_sched_clock_read);
+	read_sched_clock = read;
+	sched_clock_mask = (1 << bits) - 1;
+
+	/* calculate the mult/shift to convert counter ticks to ns. */
+	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
+
+	r = rate;
+	if (r >= 4000000) {
+		r /= 1000000;
+		r_unit = 'M';
+	} else if (r >= 1000) {
+		r /= 1000;
+		r_unit = 'k';
+	} else
+		r_unit = ' ';
+
+	/* calculate how many ns until we wrap */
+	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
+	do_div(wrap, NSEC_PER_MSEC);
+	w = wrap;
+
+	/* calculate the ns resolution of this counter */
+	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
+	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
+		bits, r, r_unit, res, w);
+
+	/*
+	 * Start the timer to keep sched_clock() properly updated and
+	 * sets the initial epoch.
+	 */
+	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
+	update_sched_clock();
+
+	/*
+	 * Ensure that sched_clock() starts off at 0ns
+	 */
+	cd.epoch_ns = 0;
+
+	pr_debug("Registered %pF as sched_clock source\n", read);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+	u32 cyc = read_sched_clock();
+	return cyc_to_sched_clock(cyc, sched_clock_mask);
+}
+
+void __init sched_clock_postinit(void)
+{
+	/*
+	 * If no sched_clock function has been provided at that point,
+	 * make it the final one one.
+	 */
+	if (read_sched_clock == jiffy_sched_clock_read)
+		setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+
+	sched_clock_poll(sched_clock_timer.data);
+}
+
+static int sched_clock_suspend(void)
+{
+	sched_clock_poll(sched_clock_timer.data);
+	if (cd.needs_suspend)
+		cd.suspended = true;
+	return 0;
+}
+
+static void sched_clock_resume(void)
+{
+	if (cd.needs_suspend) {
+		cd.epoch_cyc = read_sched_clock();
+		cd.epoch_cyc_copy = cd.epoch_cyc;
+		cd.suspended = false;
+	}
+}
+
+static struct syscore_ops sched_clock_ops = {
+	.suspend = sched_clock_suspend,
+	.resume = sched_clock_resume,
+};
+
+static int __init sched_clock_syscore_init(void)
+{
+	register_syscore_ops(&sched_clock_ops);
+	return 0;
+}
+device_initcall(sched_clock_syscore_init);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/setup.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/setup.c
new file mode 100644
index 0000000..ebfac78
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/setup.c
@@ -0,0 +1,1132 @@
+/*
+ *  linux/arch/arm/kernel/setup.c
+ *
+ *  Copyright (C) 1995-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/screen_info.h>
+#include <linux/init.h>
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/memblock.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/sort.h>
+
+#include <asm/unified.h>
+#include <asm/cp15.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/elf.h>
+#include <asm/procinfo.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp_plat.h>
+#include <asm/mach-types.h>
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/tlbflush.h>
+
+#include <asm/prom.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/time.h>
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
+#include <asm/unwind.h>
+#include <asm/memblock.h>
+
+#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
+#include "compat.h"
+#endif
+#include "atags.h"
+#include "tcm.h"
+
+#ifndef MEM_SIZE
+#define MEM_SIZE	(16*1024*1024)
+#endif
+
+#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
+char fpe_type[8];
+
+static int __init fpe_setup(char *line)
+{
+	memcpy(fpe_type, line, 8);
+	return 1;
+}
+
+__setup("fpe=", fpe_setup);
+#endif
+
+extern void paging_init(struct machine_desc *desc);
+extern void sanity_check_meminfo(void);
+extern void reboot_setup(char *str);
+
+unsigned int processor_id;
+EXPORT_SYMBOL(processor_id);
+unsigned int __machine_arch_type __read_mostly;
+EXPORT_SYMBOL(__machine_arch_type);
+unsigned int cacheid __read_mostly;
+EXPORT_SYMBOL(cacheid);
+
+unsigned int __atags_pointer __initdata;
+
+unsigned int system_rev;
+EXPORT_SYMBOL(system_rev);
+
+unsigned int system_serial_low;
+EXPORT_SYMBOL(system_serial_low);
+
+unsigned int system_serial_high;
+EXPORT_SYMBOL(system_serial_high);
+
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL(elf_hwcap);
+
+
+#ifdef MULTI_CPU
+struct processor processor __read_mostly;
+#endif
+#ifdef MULTI_TLB
+struct cpu_tlb_fns cpu_tlb __read_mostly;
+#endif
+#ifdef MULTI_USER
+struct cpu_user_fns cpu_user __read_mostly;
+#endif
+#ifdef MULTI_CACHE
+struct cpu_cache_fns cpu_cache __read_mostly;
+#endif
+#ifdef CONFIG_OUTER_CACHE
+struct outer_cache_fns outer_cache __read_mostly;
+EXPORT_SYMBOL(outer_cache);
+#endif
+
+/*
+ * Cached cpu_architecture() result for use by assembler code.
+ * C code should use the cpu_architecture() function instead of accessing this
+ * variable directly.
+ */
+int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
+
+struct stack {
+	u32 irq[3];
+	u32 abt[3];
+	u32 und[3];
+} ____cacheline_aligned;
+
+static struct stack stacks[NR_CPUS];
+
+char elf_platform[ELF_PLATFORM_SIZE];
+EXPORT_SYMBOL(elf_platform);
+
+static const char *cpu_name;
+static const char *machine_name;
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
+#define ENDIANNESS ((char)endian_test.l)
+
+DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
+
+/*
+ * Standard memory resources
+ */
+static struct resource mem_res[] = {
+	{
+		.name = "Video RAM",
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_MEM
+	},
+	{
+		.name = "Kernel code",
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_MEM
+	},
+	{
+		.name = "Kernel data",
+		.start = 0,
+		.end = 0,
+		.flags = IORESOURCE_MEM
+	}
+};
+
+#define video_ram   mem_res[0]
+#define kernel_code mem_res[1]
+#define kernel_data mem_res[2]
+
+static struct resource io_res[] = {
+	{
+		.name = "reserved",
+		.start = 0x3bc,
+		.end = 0x3be,
+		.flags = IORESOURCE_IO | IORESOURCE_BUSY
+	},
+	{
+		.name = "reserved",
+		.start = 0x378,
+		.end = 0x37f,
+		.flags = IORESOURCE_IO | IORESOURCE_BUSY
+	},
+	{
+		.name = "reserved",
+		.start = 0x278,
+		.end = 0x27f,
+		.flags = IORESOURCE_IO | IORESOURCE_BUSY
+	}
+};
+
+#define lp0 io_res[0]
+#define lp1 io_res[1]
+#define lp2 io_res[2]
+
+static const char *proc_arch[] = {
+	"undefined/unknown",
+	"3",
+	"4",
+	"4T",
+	"5",
+	"5T",
+	"5TE",
+	"5TEJ",
+	"6TEJ",
+	"7",
+	"?(11)",
+	"?(12)",
+	"?(13)",
+	"?(14)",
+	"?(15)",
+	"?(16)",
+	"?(17)",
+};
+
+static int __get_cpu_architecture(void)
+{
+	int cpu_arch;
+
+	if ((read_cpuid_id() & 0x0008f000) == 0) {
+		cpu_arch = CPU_ARCH_UNKNOWN;
+	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
+		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
+	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
+		cpu_arch = (read_cpuid_id() >> 16) & 7;
+		if (cpu_arch)
+			cpu_arch += CPU_ARCH_ARMv3;
+	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+		unsigned int mmfr0;
+
+		/* Revised CPUID format. Read the Memory Model Feature
+		 * Register 0 and check for VMSAv7 or PMSAv7 */
+		asm("mrc	p15, 0, %0, c0, c1, 4"
+		    : "=r" (mmfr0));
+		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+		    (mmfr0 & 0x000000f0) >= 0x00000030)
+			cpu_arch = CPU_ARCH_ARMv7;
+		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+			 (mmfr0 & 0x000000f0) == 0x00000020)
+			cpu_arch = CPU_ARCH_ARMv6;
+		else
+			cpu_arch = CPU_ARCH_UNKNOWN;
+	} else
+		cpu_arch = CPU_ARCH_UNKNOWN;
+
+	return cpu_arch;
+}
+
+int __pure cpu_architecture(void)
+{
+	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
+
+	return __cpu_architecture;
+}
+
+static int cpu_has_aliasing_icache(unsigned int arch)
+{
+	int aliasing_icache;
+	unsigned int id_reg, num_sets, line_size;
+
+	/* PIPT caches never alias. */
+	if (icache_is_pipt())
+		return 0;
+
+	/* arch specifies the register format */
+	switch (arch) {
+	case CPU_ARCH_ARMv7:
+		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
+		    : /* No output operands */
+		    : "r" (1));
+		isb();
+		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
+		    : "=r" (id_reg));
+		line_size = 4 << ((id_reg & 0x7) + 2);
+		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
+		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
+		break;
+	case CPU_ARCH_ARMv6:
+		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
+		break;
+	default:
+		/* I-cache aliases will be handled by D-cache aliasing code */
+		aliasing_icache = 0;
+	}
+
+	return aliasing_icache;
+}
+
+static void __init cacheid_init(void)
+{
+	unsigned int cachetype = read_cpuid_cachetype();
+	unsigned int arch = cpu_architecture();
+
+	if (arch >= CPU_ARCH_ARMv6) {
+		if ((cachetype & (7 << 29)) == 4 << 29) {
+			/* ARMv7 register format */
+			arch = CPU_ARCH_ARMv7;
+			cacheid = CACHEID_VIPT_NONALIASING;
+			switch (cachetype & (3 << 14)) {
+			case (1 << 14):
+				cacheid |= CACHEID_ASID_TAGGED;
+				break;
+			case (3 << 14):
+				cacheid |= CACHEID_PIPT;
+				break;
+			}
+		} else {
+			arch = CPU_ARCH_ARMv6;
+			if (cachetype & (1 << 23))
+				cacheid = CACHEID_VIPT_ALIASING;
+			else
+				cacheid = CACHEID_VIPT_NONALIASING;
+		}
+		if (cpu_has_aliasing_icache(arch))
+			cacheid |= CACHEID_VIPT_I_ALIASING;
+	} else {
+		cacheid = CACHEID_VIVT;
+	}
+
+	printk("CPU: %s data cache, %s instruction cache\n",
+		cache_is_vivt() ? "VIVT" :
+		cache_is_vipt_aliasing() ? "VIPT aliasing" :
+		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
+		cache_is_vivt() ? "VIVT" :
+		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
+		icache_is_vipt_aliasing() ? "VIPT aliasing" :
+		icache_is_pipt() ? "PIPT" :
+		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
+}
+
+/*
+ * These functions re-use the assembly code in head.S, which
+ * already provide the required functionality.
+ */
+extern struct proc_info_list *lookup_processor_type(unsigned int);
+
+void __init early_print(const char *str, ...)
+{
+	extern void printascii(const char *);
+	char buf[256];
+	va_list ap;
+
+	va_start(ap, str);
+	vsnprintf(buf, sizeof(buf), str, ap);
+	va_end(ap);
+
+#ifdef CONFIG_DEBUG_LL
+	printascii(buf);
+#endif
+	printk("%s", buf);
+}
+
+static void __init feat_v6_fixup(void)
+{
+	int id = read_cpuid_id();
+
+	if ((id & 0xff0f0000) != 0x41070000)
+		return;
+
+	/*
+	 * HWCAP_TLS is available only on 1136 r1p0 and later,
+	 * see also kuser_get_tls_init.
+	 */
+	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
+		elf_hwcap &= ~HWCAP_TLS;
+}
+
+/*
+ * cpu_init - initialise one CPU.
+ *
+ * cpu_init sets up the per-CPU stacks.
+ */
+void cpu_init(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct stack *stk = &stacks[cpu];
+
+	if (cpu >= NR_CPUS) {
+		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
+		BUG();
+	}
+
+	cpu_proc_init();
+
+	/*
+	 * Define the placement constraint for the inline asm directive below.
+	 * In Thumb-2, msr with an immediate value is not allowed.
+	 */
+#ifdef CONFIG_THUMB2_KERNEL
+#define PLC	"r"
+#else
+#define PLC	"I"
+#endif
+
+	/*
+	 * setup stacks for re-entrant exception handlers
+	 */
+	__asm__ (
+	"msr	cpsr_c, %1\n\t"
+	"add	r14, %0, %2\n\t"
+	"mov	sp, r14\n\t"
+	"msr	cpsr_c, %3\n\t"
+	"add	r14, %0, %4\n\t"
+	"mov	sp, r14\n\t"
+	"msr	cpsr_c, %5\n\t"
+	"add	r14, %0, %6\n\t"
+	"mov	sp, r14\n\t"
+	"msr	cpsr_c, %7"
+	    :
+	    : "r" (stk),
+	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+	      "I" (offsetof(struct stack, irq[0])),
+	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+	      "I" (offsetof(struct stack, abt[0])),
+	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+	      "I" (offsetof(struct stack, und[0])),
+	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+	    : "r14");
+}
+
+int __cpu_logical_map[NR_CPUS];
+
+void __init smp_setup_processor_id(void)
+{
+	int i;
+	u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
+
+	cpu_logical_map(0) = cpu;
+	for (i = 1; i < NR_CPUS; ++i)
+		cpu_logical_map(i) = i == cpu ? 0 : i;
+
+	printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
+}
+
+static void __init setup_processor(void)
+{
+	struct proc_info_list *list;
+
+	/*
+	 * locate processor in the list of supported processor
+	 * types.  The linker builds this table for us from the
+	 * entries in arch/arm/mm/proc-*.S
+	 */
+	list = lookup_processor_type(read_cpuid_id());
+	if (!list) {
+		printk("CPU configuration botched (ID %08x), unable "
+		       "to continue.\n", read_cpuid_id());
+		while (1);
+	}
+
+	cpu_name = list->cpu_name;
+	__cpu_architecture = __get_cpu_architecture();
+
+#ifdef MULTI_CPU
+	processor = *list->proc;
+#endif
+#ifdef MULTI_TLB
+	cpu_tlb = *list->tlb;
+#endif
+#ifdef MULTI_USER
+	cpu_user = *list->user;
+#endif
+#ifdef MULTI_CACHE
+	cpu_cache = *list->cache;
+#endif
+
+	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+	       proc_arch[cpu_architecture()], cr_alignment);
+
+	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
+		 list->arch_name, ENDIANNESS);
+	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
+		 list->elf_name, ENDIANNESS);
+	elf_hwcap = list->elf_hwcap;
+#ifndef CONFIG_ARM_THUMB
+	elf_hwcap &= ~HWCAP_THUMB;
+#endif
+
+	feat_v6_fixup();
+
+	cacheid_init();
+	cpu_init();
+}
+
+void __init dump_machine_table(void)
+{
+	struct machine_desc *p;
+
+	early_print("Available machine support:\n\nID (hex)\tNAME\n");
+	for_each_machine_desc(p)
+		early_print("%08x\t%s\n", p->nr, p->name);
+
+	early_print("\nPlease check your kernel config and/or bootloader.\n");
+
+	while (true)
+		/* can't use cpu_relax() here as it may require MMU setup */;
+}
+
+int __init arm_add_memory(phys_addr_t start, unsigned long size)
+{
+	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+
+	if (meminfo.nr_banks >= NR_BANKS) {
+		printk(KERN_CRIT "NR_BANKS too low, "
+			"ignoring memory at 0x%08llx\n", (long long)start);
+		return -EINVAL;
+	}
+
+	/*
+	 * Ensure that start/size are aligned to a page boundary.
+	 * Size is appropriately rounded down, start is rounded up.
+	 */
+	size -= start & ~PAGE_MASK;
+	bank->start = PAGE_ALIGN(start);
+
+#ifndef CONFIG_LPAE
+	if (bank->start + size < bank->start) {
+		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
+			"32-bit physical address space\n", (long long)start);
+		/*
+		 * To ensure bank->start + bank->size is representable in
+		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
+		 * This means we lose a page after masking.
+		 */
+		size = ULONG_MAX - bank->start;
+	}
+#endif
+
+	bank->size = size & PAGE_MASK;
+
+	/*
+	 * Check whether this memory region has non-zero size or
+	 * invalid node number.
+	 */
+	if (bank->size == 0)
+		return -EINVAL;
+
+	meminfo.nr_banks++;
+	return 0;
+}
+
+/*
+ * Pick out the memory size.  We look for mem=size@start,
+ * where start and size are "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+	static int usermem __initdata = 0;
+	unsigned long size;
+	phys_addr_t start;
+	char *endp;
+
+	/*
+	 * If the user specifies memory size, we
+	 * blow away any automatically generated
+	 * size.
+	 */
+	if (usermem == 0) {
+		usermem = 1;
+		meminfo.nr_banks = 0;
+	}
+
+	start = PHYS_OFFSET;
+	size  = memparse(p, &endp);
+	if (*endp == '@')
+		start = memparse(endp + 1, NULL);
+
+	arm_add_memory(start, size);
+
+	return 0;
+}
+early_param("mem", early_mem);
+
+static void __init
+setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
+{
+#ifdef CONFIG_BLK_DEV_RAM
+	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
+
+	rd_image_start = image_start;
+	rd_prompt = prompt;
+	rd_doload = doload;
+
+	if (rd_sz)
+		rd_size = rd_sz;
+#endif
+}
+
+static void __init request_standard_resources(struct machine_desc *mdesc)
+{
+	struct memblock_region *region;
+	struct resource *res;
+
+	kernel_code.start   = virt_to_phys(_text);
+	kernel_code.end     = virt_to_phys(_etext - 1);
+	kernel_data.start   = virt_to_phys(_sdata);
+	kernel_data.end     = virt_to_phys(_end - 1);
+
+	for_each_memblock(memory, region) {
+		res = alloc_bootmem_low(sizeof(*res));
+		res->name  = "System RAM";
+		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+		request_resource(&iomem_resource, res);
+
+		if (kernel_code.start >= res->start &&
+		    kernel_code.end <= res->end)
+			request_resource(res, &kernel_code);
+		if (kernel_data.start >= res->start &&
+		    kernel_data.end <= res->end)
+			request_resource(res, &kernel_data);
+	}
+
+	if (mdesc->video_start) {
+		video_ram.start = mdesc->video_start;
+		video_ram.end   = mdesc->video_end;
+		request_resource(&iomem_resource, &video_ram);
+	}
+
+	/*
+	 * Some machines don't have the possibility of ever
+	 * possessing lp0, lp1 or lp2
+	 */
+	if (mdesc->reserve_lp0)
+		request_resource(&ioport_resource, &lp0);
+	if (mdesc->reserve_lp1)
+		request_resource(&ioport_resource, &lp1);
+	if (mdesc->reserve_lp2)
+		request_resource(&ioport_resource, &lp2);
+}
+
+/*
+ *  Tag parsing.
+ *
+ * This is the new way of passing data to the kernel at boot time.  Rather
+ * than passing a fixed inflexible structure to the kernel, we pass a list
+ * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
+ * tag for the list to be recognised (to distinguish the tagged list from
+ * a param_struct).  The list is terminated with a zero-length tag (this tag
+ * is not parsed in any way).
+ */
+static int __init parse_tag_core(const struct tag *tag)
+{
+	if (tag->hdr.size > 2) {
+		if ((tag->u.core.flags & 1) == 0)
+			root_mountflags &= ~MS_RDONLY;
+		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
+	}
+	return 0;
+}
+
+__tagtable(ATAG_CORE, parse_tag_core);
+
+static int __init parse_tag_mem32(const struct tag *tag)
+{
+	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
+}
+
+__tagtable(ATAG_MEM, parse_tag_mem32);
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = {
+ .orig_video_lines	= 30,
+ .orig_video_cols	= 80,
+ .orig_video_mode	= 0,
+ .orig_video_ega_bx	= 0,
+ .orig_video_isVGA	= 1,
+ .orig_video_points	= 8
+};
+
+static int __init parse_tag_videotext(const struct tag *tag)
+{
+	screen_info.orig_x            = tag->u.videotext.x;
+	screen_info.orig_y            = tag->u.videotext.y;
+	screen_info.orig_video_page   = tag->u.videotext.video_page;
+	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
+	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
+	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
+	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
+	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
+	screen_info.orig_video_points = tag->u.videotext.video_points;
+	return 0;
+}
+
+__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
+#endif
+
+static int __init parse_tag_ramdisk(const struct tag *tag)
+{
+	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
+		      (tag->u.ramdisk.flags & 2) == 0,
+		      tag->u.ramdisk.start, tag->u.ramdisk.size);
+	return 0;
+}
+
+__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
+
+static int __init parse_tag_serialnr(const struct tag *tag)
+{
+	system_serial_low = tag->u.serialnr.low;
+	system_serial_high = tag->u.serialnr.high;
+	return 0;
+}
+
+__tagtable(ATAG_SERIAL, parse_tag_serialnr);
+
+static int __init parse_tag_revision(const struct tag *tag)
+{
+	system_rev = tag->u.revision.rev;
+	return 0;
+}
+
+__tagtable(ATAG_REVISION, parse_tag_revision);
+
+static int __init parse_tag_cmdline(const struct tag *tag)
+{
+#if defined(CONFIG_CMDLINE_EXTEND)
+	strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
+	strlcat(default_command_line, tag->u.cmdline.cmdline,
+		COMMAND_LINE_SIZE);
+#elif defined(CONFIG_CMDLINE_FORCE)
+	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
+#else
+	strlcpy(default_command_line, tag->u.cmdline.cmdline,
+		COMMAND_LINE_SIZE);
+#endif
+	return 0;
+}
+
+__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
+
+/*
+ * Scan the tag table for this tag, and call its parse function.
+ * The tag table is built by the linker from all the __tagtable
+ * declarations.
+ */
+static int __init parse_tag(const struct tag *tag)
+{
+	extern struct tagtable __tagtable_begin, __tagtable_end;
+	struct tagtable *t;
+
+	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
+		if (tag->hdr.tag == t->tag) {
+			t->parse(tag);
+			break;
+		}
+
+	return t < &__tagtable_end;
+}
+
+/*
+ * Parse all tags in the list, checking both the global and architecture
+ * specific tag tables.
+ */
+static void __init parse_tags(const struct tag *t)
+{
+	for (; t->hdr.size; t = tag_next(t))
+		if (!parse_tag(t))
+			printk(KERN_WARNING
+				"Ignoring unrecognised tag 0x%08x\n",
+				t->hdr.tag);
+}
+
+/*
+ * This holds our defaults.
+ */
+static struct init_tags {
+	struct tag_header hdr1;
+	struct tag_core   core;
+	struct tag_header hdr2;
+	struct tag_mem32  mem;
+	struct tag_header hdr3;
+} init_tags __initdata = {
+	{ tag_size(tag_core), ATAG_CORE },
+	{ 1, PAGE_SIZE, 0xff },
+	{ tag_size(tag_mem32), ATAG_MEM },
+	{ MEM_SIZE },
+	{ 0, ATAG_NONE }
+};
+
+static int __init customize_machine(void)
+{
+	/* customizes platform devices, or adds new ones */
+	if (machine_desc->init_machine)
+		machine_desc->init_machine();
+	return 0;
+}
+arch_initcall(customize_machine);
+
+#ifdef CONFIG_KEXEC
+static inline unsigned long long get_total_mem(void)
+{
+	unsigned long total;
+
+	total = max_low_pfn - min_low_pfn;
+	return total << PAGE_SHIFT;
+}
+
+/**
+ * reserve_crashkernel() - reserves memory are for crash kernel
+ *
+ * This function reserves memory area given in "crashkernel=" kernel command
+ * line parameter. The memory reserved is used by a dump capture kernel when
+ * primary kernel is crashing.
+ */
+static void __init reserve_crashkernel(void)
+{
+	unsigned long long crash_size, crash_base;
+	unsigned long long total_mem;
+	int ret;
+
+	total_mem = get_total_mem();
+	ret = parse_crashkernel(boot_command_line, total_mem,
+				&crash_size, &crash_base);
+	if (ret)
+		return;
+
+	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
+	if (ret < 0) {
+		printk(KERN_WARNING "crashkernel reservation failed - "
+		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
+		return;
+	}
+
+	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+	       "for crashkernel (System RAM: %ldMB)\n",
+	       (unsigned long)(crash_size >> 20),
+	       (unsigned long)(crash_base >> 20),
+	       (unsigned long)(total_mem >> 20));
+
+	crashk_res.start = crash_base;
+	crashk_res.end = crash_base + crash_size - 1;
+	insert_resource(&iomem_resource, &crashk_res);
+}
+#else
+static inline void reserve_crashkernel(void) {}
+#endif /* CONFIG_KEXEC */
+
+static void __init squash_mem_tags(struct tag *tag)
+{
+	for (; tag->hdr.size; tag = tag_next(tag))
+		if (tag->hdr.tag == ATAG_MEM)
+			tag->hdr.tag = ATAG_NONE;
+}
+
+static struct machine_desc * __init setup_machine_tags(unsigned int nr)
+{
+	struct tag *tags = (struct tag *)&init_tags;
+	struct machine_desc *mdesc = NULL, *p;
+	char *from = default_command_line;
+
+	init_tags.mem.start = PHYS_OFFSET;
+
+	/*
+	 * locate machine in the list of supported machines.
+	 */
+	for_each_machine_desc(p)
+		if (nr == p->nr) {
+			printk("Machine: %s\n", p->name);
+			mdesc = p;
+			break;
+		}
+
+	if (!mdesc) {
+		early_print("\nError: unrecognized/unsupported machine ID"
+			" (r1 = 0x%08x).\n\n", nr);
+		dump_machine_table(); /* does not return */
+	}
+
+	if (__atags_pointer)
+		tags = phys_to_virt(__atags_pointer);
+	else if (mdesc->atag_offset)
+		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
+
+#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
+	/*
+	 * If we have the old style parameters, convert them to
+	 * a tag list.
+	 */
+	if (tags->hdr.tag != ATAG_CORE)
+		convert_to_tag_list(tags);
+#endif
+
+	if (tags->hdr.tag != ATAG_CORE) {
+#if defined(CONFIG_OF)
+		/*
+		 * If CONFIG_OF is set, then assume this is a reasonably
+		 * modern system that should pass boot parameters
+		 */
+		early_print("Warning: Neither atags nor dtb found\n");
+#endif
+		tags = (struct tag *)&init_tags;
+	}
+
+	if (mdesc->fixup)
+		mdesc->fixup(tags, &from, &meminfo);
+
+	if (tags->hdr.tag == ATAG_CORE) {
+		if (meminfo.nr_banks != 0)
+			squash_mem_tags(tags);
+		save_atags(tags);
+		parse_tags(tags);
+	}
+
+	/* parse_early_param needs a boot_command_line */
+	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+	return mdesc;
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+	const struct membank *a = _a, *b = _b;
+	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	struct machine_desc *mdesc;
+
+	setup_processor();
+	mdesc = setup_machine_fdt(__atags_pointer);
+	if (!mdesc)
+		mdesc = setup_machine_tags(machine_arch_type);
+	machine_desc = mdesc;
+	machine_name = mdesc->name;
+
+#ifdef CONFIG_ZONE_DMA
+	if (mdesc->dma_zone_size) {
+		extern unsigned long arm_dma_zone_size;
+		arm_dma_zone_size = mdesc->dma_zone_size;
+	}
+#endif
+	if (mdesc->restart_mode)
+		reboot_setup(&mdesc->restart_mode);
+
+	init_mm.start_code = (unsigned long) _text;
+	init_mm.end_code   = (unsigned long) _etext;
+	init_mm.end_data   = (unsigned long) _edata;
+	init_mm.brk	   = (unsigned long) _end;
+
+	/* populate cmd_line too for later use, preserving boot_command_line */
+	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+	*cmdline_p = cmd_line;
+
+	parse_early_param();
+
+	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+	sanity_check_meminfo();
+	arm_memblock_init(&meminfo, mdesc);
+
+	paging_init(mdesc);
+	request_standard_resources(mdesc);
+
+	if (mdesc->restart)
+		arm_pm_restart = mdesc->restart;
+
+	unflatten_device_tree();
+
+#ifdef CONFIG_SMP
+	if (is_smp())
+		smp_init_cpus();
+#endif
+	reserve_crashkernel();
+
+	tcm_init();
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+	handle_arch_irq = mdesc->handle_irq;
+#endif
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+	conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+#endif
+#endif
+
+	if (mdesc->init_early)
+		mdesc->init_early();
+}
+
+
+static int __init topology_init(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
+		cpuinfo->cpu.hotpluggable = 1;
+		register_cpu(&cpuinfo->cpu, cpu);
+	}
+
+	return 0;
+}
+subsys_initcall(topology_init);
+
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+	struct proc_dir_entry *res;
+
+	res = proc_mkdir("cpu", NULL);
+	if (!res)
+		return -ENOMEM;
+	return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
+static const char *hwcap_str[] = {
+	"swp",
+	"half",
+	"thumb",
+	"26bit",
+	"fastmult",
+	"fpa",
+	"vfp",
+	"edsp",
+	"java",
+	"iwmmxt",
+	"crunch",
+	"thumbee",
+	"neon",
+	"vfpv3",
+	"vfpv3d16",
+	"tls",
+	"vfpv4",
+	"idiva",
+	"idivt",
+	NULL
+};
+
+static int c_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+		   cpu_name, read_cpuid_id() & 15, elf_platform);
+
+#if defined(CONFIG_SMP)
+	for_each_online_cpu(i) {
+		/*
+		 * glibc reads /proc/cpuinfo to determine the number of
+		 * online processors, looking for lines beginning with
+		 * "processor".  Give glibc what it expects.
+		 */
+		seq_printf(m, "processor\t: %d\n", i);
+		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
+			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+	}
+#else /* CONFIG_SMP */
+	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+		   loops_per_jiffy / (500000/HZ),
+		   (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
+
+	/* dump out the processor features */
+	seq_puts(m, "Features\t: ");
+
+	for (i = 0; hwcap_str[i]; i++)
+		if (elf_hwcap & (1 << i))
+			seq_printf(m, "%s ", hwcap_str[i]);
+
+	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
+	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
+
+	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
+		/* pre-ARM7 */
+		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
+	} else {
+		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
+			/* ARM7 */
+			seq_printf(m, "CPU variant\t: 0x%02x\n",
+				   (read_cpuid_id() >> 16) & 127);
+		} else {
+			/* post-ARM7 */
+			seq_printf(m, "CPU variant\t: 0x%x\n",
+				   (read_cpuid_id() >> 20) & 15);
+		}
+		seq_printf(m, "CPU part\t: 0x%03x\n",
+			   (read_cpuid_id() >> 4) & 0xfff);
+	}
+	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
+
+	seq_puts(m, "\n");
+
+	seq_printf(m, "Hardware\t: %s\n", machine_name);
+	seq_printf(m, "Revision\t: %04x\n", system_rev);
+	seq_printf(m, "Serial\t\t: %08x%08x\n",
+		   system_serial_high, system_serial_low);
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= c_show
+};
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.c
new file mode 100644
index 0000000..51532cb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.c
@@ -0,0 +1,747 @@
+/*
+ *  linux/arch/arm/kernel/signal.c
+ *
+ *  Copyright (C) 1995-2009 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+
+#include <asm/elf.h>
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/unistd.h>
+#include <asm/vfp.h>
+
+#include "signal.h"
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ */
+#define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART		(0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
+
+/*
+ * With EABI, the syscall number has to be loaded into r7.
+ */
+#define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
+#define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+
+/*
+ * For Thumb syscalls, we pass the syscall number via r7.  We therefore
+ * need two 16-bit instructions.
+ */
+#define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
+#define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+
+const unsigned long sigreturn_codes[7] = {
+	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
+	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
+};
+
+/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled.  In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+	SWI_SYS_RESTART,	/* swi	__NR_restart_syscall */
+	0xe49df004,		/* ldr	pc, [sp], #4 */
+};
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
+{
+	sigset_t blocked;
+
+	current->saved_sigmask = current->blocked;
+
+	mask &= _BLOCKABLE;
+	siginitset(&blocked, mask);
+	set_current_blocked(&blocked);
+
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	set_restore_sigmask();
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage int 
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+	      struct old_sigaction __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+			return -EFAULT;
+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		__get_user(mask, &act->sa_mask);
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+			return -EFAULT;
+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_CRUNCH
+static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+{
+	char kbuf[sizeof(*frame) + 8];
+	struct crunch_sigframe *kframe;
+
+	/* the crunch context must be 64 bit aligned */
+	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+	kframe->magic = CRUNCH_MAGIC;
+	kframe->size = CRUNCH_STORAGE_SIZE;
+	crunch_task_copy(current_thread_info(), &kframe->storage);
+	return __copy_to_user(frame, kframe, sizeof(*frame));
+}
+
+static int restore_crunch_context(struct crunch_sigframe __user *frame)
+{
+	char kbuf[sizeof(*frame) + 8];
+	struct crunch_sigframe *kframe;
+
+	/* the crunch context must be 64 bit aligned */
+	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+	if (__copy_from_user(kframe, frame, sizeof(*frame)))
+		return -1;
+	if (kframe->magic != CRUNCH_MAGIC ||
+	    kframe->size != CRUNCH_STORAGE_SIZE)
+		return -1;
+	crunch_task_restore(current_thread_info(), &kframe->storage);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_IWMMXT
+
+static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
+{
+	char kbuf[sizeof(*frame) + 8];
+	struct iwmmxt_sigframe *kframe;
+
+	/* the iWMMXt context must be 64 bit aligned */
+	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+	kframe->magic = IWMMXT_MAGIC;
+	kframe->size = IWMMXT_STORAGE_SIZE;
+	iwmmxt_task_copy(current_thread_info(), &kframe->storage);
+	return __copy_to_user(frame, kframe, sizeof(*frame));
+}
+
+static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
+{
+	char kbuf[sizeof(*frame) + 8];
+	struct iwmmxt_sigframe *kframe;
+
+	/* the iWMMXt context must be 64 bit aligned */
+	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
+	if (__copy_from_user(kframe, frame, sizeof(*frame)))
+		return -1;
+	if (kframe->magic != IWMMXT_MAGIC ||
+	    kframe->size != IWMMXT_STORAGE_SIZE)
+		return -1;
+	iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+	return 0;
+}
+
+#endif
+
+#ifdef CONFIG_VFP
+
+static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+{
+	const unsigned long magic = VFP_MAGIC;
+	const unsigned long size = VFP_STORAGE_SIZE;
+	int err = 0;
+
+	__put_user_error(magic, &frame->magic, err);
+	__put_user_error(size, &frame->size, err);
+
+	if (err)
+		return -EFAULT;
+
+	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+}
+
+static int restore_vfp_context(struct vfp_sigframe __user *frame)
+{
+	unsigned long magic;
+	unsigned long size;
+	int err = 0;
+
+	__get_user_error(magic, &frame->magic, err);
+	__get_user_error(size, &frame->size, err);
+
+	if (err)
+		return -EFAULT;
+	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+		return -EINVAL;
+
+	return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+}
+
+#endif
+
+/*
+ * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
+ */
+struct sigframe {
+	struct ucontext uc;
+	unsigned long retcode[2];
+};
+
+struct rt_sigframe {
+	struct siginfo info;
+	struct sigframe sig;
+};
+
+static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+{
+	struct aux_sigframe __user *aux;
+	sigset_t set;
+	int err;
+
+	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+	if (err == 0) {
+		sigdelsetmask(&set, ~_BLOCKABLE);
+		set_current_blocked(&set);
+	}
+
+	__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+	__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+	__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+	__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+	__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+	__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+	__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+	__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+	__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+	__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+	__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+	__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+	__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+	__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+	__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+	__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+	__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+	err |= !valid_user_regs(regs);
+
+	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+	if (err == 0)
+		err |= restore_crunch_context(&aux->crunch);
+#endif
+#ifdef CONFIG_IWMMXT
+	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+		err |= restore_iwmmxt_context(&aux->iwmmxt);
+#endif
+#ifdef CONFIG_VFP
+	if (err == 0)
+		err |= restore_vfp_context(&aux->vfp);
+#endif
+
+	return err;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+	struct sigframe __user *frame;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	/*
+	 * Since we stacked the signal on a 64-bit boundary,
+	 * then 'sp' should be word aligned here.  If it's
+	 * not, then the user is trying to mess with us.
+	 */
+	if (regs->ARM_sp & 7)
+		goto badframe;
+
+	frame = (struct sigframe __user *)regs->ARM_sp;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+		goto badframe;
+
+	if (restore_sigframe(regs, frame))
+		goto badframe;
+
+	return regs->ARM_r0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	/*
+	 * Since we stacked the signal on a 64-bit boundary,
+	 * then 'sp' should be word aligned here.  If it's
+	 * not, then the user is trying to mess with us.
+	 */
+	if (regs->ARM_sp & 7)
+		goto badframe;
+
+	frame = (struct rt_sigframe __user *)regs->ARM_sp;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+		goto badframe;
+
+	if (restore_sigframe(regs, &frame->sig))
+		goto badframe;
+
+	if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
+		goto badframe;
+
+	return regs->ARM_r0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+static int
+setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
+{
+	struct aux_sigframe __user *aux;
+	int err = 0;
+
+	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+
+	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
+	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
+	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
+	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+#ifdef CONFIG_CRUNCH
+	if (err == 0)
+		err |= preserve_crunch_context(&aux->crunch);
+#endif
+#ifdef CONFIG_IWMMXT
+	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+		err |= preserve_iwmmxt_context(&aux->iwmmxt);
+#endif
+#ifdef CONFIG_VFP
+	if (err == 0)
+		err |= preserve_vfp_context(&aux->vfp);
+#endif
+	__put_user_error(0, &aux->end_magic, err);
+
+	return err;
+}
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
+{
+	unsigned long sp = regs->ARM_sp;
+	void __user *frame;
+
+	/*
+	 * This is the X/Open sanctioned signal stack switching.
+	 */
+	if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+		sp = current->sas_ss_sp + current->sas_ss_size;
+
+	/*
+	 * ATPCS B01 mandates 8-byte alignment
+	 */
+	frame = (void __user *)((sp - framesize) & ~7);
+
+	/*
+	 * Check that we can actually write to the signal frame.
+	 */
+	if (!access_ok(VERIFY_WRITE, frame, framesize))
+		frame = NULL;
+
+	return frame;
+}
+
+static int
+setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+	     unsigned long __user *rc, void __user *frame, int usig)
+{
+	unsigned long handler = (unsigned long)ka->sa.sa_handler;
+	unsigned long retcode;
+	int thumb = 0;
+	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+	cpsr |= PSR_ENDSTATE;
+
+	/*
+	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
+	 */
+	if (ka->sa.sa_flags & SA_THIRTYTWO)
+		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
+
+#ifdef CONFIG_ARM_THUMB
+	if (elf_hwcap & HWCAP_THUMB) {
+		/*
+		 * The LSB of the handler determines if we're going to
+		 * be using THUMB or ARM mode for this signal handler.
+		 */
+		thumb = handler & 1;
+
+		if (thumb) {
+			cpsr |= PSR_T_BIT;
+#if __LINUX_ARM_ARCH__ >= 7
+			/* clear the If-Then Thumb-2 execution state */
+			cpsr &= ~PSR_IT_MASK;
+#endif
+		} else
+			cpsr &= ~PSR_T_BIT;
+	}
+#endif
+
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		retcode = (unsigned long)ka->sa.sa_restorer;
+	} else {
+		unsigned int idx = thumb << 1;
+
+		if (ka->sa.sa_flags & SA_SIGINFO)
+			idx += 3;
+
+		if (__put_user(sigreturn_codes[idx],   rc) ||
+		    __put_user(sigreturn_codes[idx+1], rc+1))
+			return 1;
+
+		if (cpsr & MODE32_BIT) {
+			/*
+			 * 32-bit code can use the new high-page
+			 * signal return code support.
+			 */
+			retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
+		} else {
+			/*
+			 * Ensure that the instruction cache sees
+			 * the return code written onto the stack.
+			 */
+			flush_icache_range((unsigned long)rc,
+					   (unsigned long)(rc + 2));
+
+			retcode = ((unsigned long)rc) + thumb;
+		}
+	}
+
+	regs->ARM_r0 = usig;
+	regs->ARM_sp = (unsigned long)frame;
+	regs->ARM_lr = retcode;
+	regs->ARM_pc = handler;
+	regs->ARM_cpsr = cpsr;
+
+	return 0;
+}
+
+static int
+setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
+{
+	struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+	int err = 0;
+
+	if (!frame)
+		return 1;
+
+	/*
+	 * Set uc.uc_flags to a value which sc.trap_no would never have.
+	 */
+	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+
+	err |= setup_sigframe(frame, regs, set);
+	if (err == 0)
+		err = setup_return(regs, ka, frame->retcode, frame, usig);
+
+	return err;
+}
+
+static int
+setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
+	       sigset_t *set, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+	stack_t stack;
+	int err = 0;
+
+	if (!frame)
+		return 1;
+
+	err |= copy_siginfo_to_user(&frame->info, info);
+
+	__put_user_error(0, &frame->sig.uc.uc_flags, err);
+	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
+
+	memset(&stack, 0, sizeof(stack));
+	stack.ss_sp = (void __user *)current->sas_ss_sp;
+	stack.ss_flags = sas_ss_flags(regs->ARM_sp);
+	stack.ss_size = current->sas_ss_size;
+	err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
+
+	err |= setup_sigframe(&frame->sig, regs, set);
+	if (err == 0)
+		err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
+
+	if (err == 0) {
+		/*
+		 * For realtime signals we must also set the second and third
+		 * arguments for the signal handler.
+		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
+		 */
+		regs->ARM_r1 = (unsigned long)&frame->info;
+		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
+	}
+
+	return err;
+}
+
+/*
+ * OK, we're invoking a handler
+ */	
+static int
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+	      siginfo_t *info, sigset_t *oldset,
+	      struct pt_regs * regs)
+{
+	struct thread_info *thread = current_thread_info();
+	struct task_struct *tsk = current;
+	int usig = sig;
+	int ret;
+
+	/*
+	 * translate the signal
+	 */
+	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
+		usig = thread->exec_domain->signal_invmap[usig];
+
+	/*
+	 * Set up the stack frame
+	 */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		ret = setup_rt_frame(usig, ka, info, oldset, regs);
+	else
+		ret = setup_frame(usig, ka, oldset, regs);
+
+	/*
+	 * Check that the resulting registers are actually sane.
+	 */
+	ret |= !valid_user_regs(regs);
+
+	if (ret != 0) {
+		force_sigsegv(sig, tsk);
+		return ret;
+	}
+
+	/*
+	 * Block the signal if we were successful.
+	 */
+	block_sigmask(ka, sig);
+
+	return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+	unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
+	struct k_sigaction ka;
+	siginfo_t info;
+	int signr;
+
+	/*
+	 * We want the common case to go fast, which
+	 * is why we may in certain cases get here from
+	 * kernel mode. Just return without doing anything
+	 * if so.
+	 */
+	if (!user_mode(regs))
+		return;
+
+	local_irq_enable();
+	preempt_check_resched();
+
+	/*
+	 * If we were from a system call, check for system call restarting...
+	 */
+	if (syscall) {
+		continue_addr = regs->ARM_pc;
+		restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
+		retval = regs->ARM_r0;
+
+		/*
+		 * Prepare for system call restart.  We do this here so that a
+		 * debugger will see the already changed PSW.
+		 */
+		switch (retval) {
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs->ARM_r0 = regs->ARM_ORIG_r0;
+			regs->ARM_pc = restart_addr;
+			break;
+		case -ERESTART_RESTARTBLOCK:
+			regs->ARM_r0 = -EINTR;
+			break;
+		}
+	}
+#ifdef CONFIG_FREEZER
+	if (try_to_freeze_nowarn())
+#else
+	if (try_to_freeze())
+#endif
+		goto no_signal;
+
+	/*
+	 * Get the signal to deliver.  When running under ptrace, at this
+	 * point the debugger may change all our registers ...
+	 */
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+	if (signr > 0) {
+		sigset_t *oldset;
+
+		/*
+		 * Depending on the signal settings we may need to revert the
+		 * decision to restart the system call.  But skip this if a
+		 * debugger has chosen to restart at a different PC.
+		 */
+		if (regs->ARM_pc == restart_addr) {
+			if (retval == -ERESTARTNOHAND
+			    || (retval == -ERESTARTSYS
+				&& !(ka.sa.sa_flags & SA_RESTART))) {
+				regs->ARM_r0 = -EINTR;
+				regs->ARM_pc = continue_addr;
+			}
+		}
+
+		if (test_thread_flag(TIF_RESTORE_SIGMASK))
+			oldset = &current->saved_sigmask;
+		else
+			oldset = &current->blocked;
+		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+			/*
+			 * A signal was successfully delivered; the saved
+			 * sigmask will have been stored in the signal frame,
+			 * and will be restored by sigreturn, so we can simply
+			 * clear the TIF_RESTORE_SIGMASK flag.
+			 */
+			if (test_thread_flag(TIF_RESTORE_SIGMASK))
+				clear_thread_flag(TIF_RESTORE_SIGMASK);
+		}
+		return;
+	}
+
+ no_signal:
+	if (syscall) {
+		/*
+		 * Handle restarting a different system call.  As above,
+		 * if a debugger has chosen to restart at a different PC,
+		 * ignore the restart.
+		 */
+		if (retval == -ERESTART_RESTARTBLOCK
+		    && regs->ARM_pc == continue_addr) {
+			if (thumb_mode(regs)) {
+				regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+				regs->ARM_pc -= 2;
+			} else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+				regs->ARM_r7 = __NR_restart_syscall;
+				regs->ARM_pc -= 4;
+#else
+				u32 __user *usp;
+
+				regs->ARM_sp -= 4;
+				usp = (u32 __user *)regs->ARM_sp;
+
+				if (put_user(regs->ARM_pc, usp) == 0) {
+					regs->ARM_pc = KERN_RESTART_CODE;
+				} else {
+					regs->ARM_sp += 4;
+					force_sigsegv(0, current);
+				}
+#endif
+			}
+		}
+
+		/* If there's no signal to deliver, we just put the saved sigmask
+		 * back.
+		 */
+		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+			clear_thread_flag(TIF_RESTORE_SIGMASK);
+			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+		}
+	}
+}
+
+asmlinkage void
+do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+{
+	if (thread_flags & _TIF_SIGPENDING)
+		do_signal(regs, syscall);
+
+	if (thread_flags & _TIF_NOTIFY_RESUME) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+		if (current->replacement_session_keyring)
+			key_replace_session_keyring();
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.h
new file mode 100644
index 0000000..6fcfe83
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/signal.h
@@ -0,0 +1,14 @@
+/*
+ *  linux/arch/arm/kernel/signal.h
+ *
+ *  Copyright (C) 2005-2009 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define KERN_SIGRETURN_CODE	(CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE	(KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
+
+extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/sleep.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sleep.S
new file mode 100644
index 0000000..987dcf3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sleep.S
@@ -0,0 +1,104 @@
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+	.text
+
+/*
+ * Save CPU state for a suspend.  This saves the CPU general purpose
+ * registers, and allocates space on the kernel stack to save the CPU
+ * specific registers and some other data for resume.
+ *  r0 = suspend function arg0
+ *  r1 = suspend function
+ */
+ENTRY(__cpu_suspend)
+	stmfd	sp!, {r4 - r11, lr}
+#ifdef MULTI_CPU
+	ldr	r10, =processor
+	ldr	r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+#else
+	ldr	r4, =cpu_suspend_size
+#endif
+	mov	r5, sp			@ current virtual SP
+	add	r4, r4, #12		@ Space for pgd, virt sp, phys resume fn
+	sub	sp, sp, r4		@ allocate CPU state on stack
+	stmfd	sp!, {r0, r1}		@ save suspend func arg and pointer
+	add	r0, sp, #8		@ save pointer to save block
+	mov	r1, r4			@ size of save block
+	mov	r2, r5			@ virtual SP
+	ldr	r3, =sleep_save_sp
+#ifdef CONFIG_SMP
+	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+	ALT_UP(mov lr, #0)
+	and	lr, lr, #15
+	add	r3, r3, lr, lsl #2
+#endif
+	bl	__cpu_suspend_save
+	adr	lr, BSYM(cpu_suspend_abort)
+	ldmfd	sp!, {r0, pc}		@ call suspend fn
+ENDPROC(__cpu_suspend)
+	.ltorg
+
+cpu_suspend_abort:
+	ldmia	sp!, {r1 - r3}		@ pop phys pgd, virt SP, phys resume fn
+	teq	r0, #0
+	moveq	r0, #1			@ force non-zero value
+	mov	sp, r2
+	ldmfd	sp!, {r4 - r11, pc}
+ENDPROC(cpu_suspend_abort)
+
+/*
+ * r0 = control register value
+ */
+	.align	5
+	.pushsection	.idmap.text,"ax"
+ENTRY(cpu_resume_mmu)
+	ldr	r3, =cpu_resume_after_mmu
+	instr_sync
+	mcr	p15, 0, r0, c1, c0, 0	@ turn on MMU, I-cache, etc
+	mrc	p15, 0, r0, c0, c0, 0	@ read id reg
+	instr_sync
+	mov	r0, r0
+	mov	r0, r0
+	mov	pc, r3			@ jump to virtual address
+ENDPROC(cpu_resume_mmu)
+	.popsection
+cpu_resume_after_mmu:
+	bl	cpu_init		@ restore the und/abt/irq banked regs
+	mov	r0, #0			@ return zero on success
+	ldmfd	sp!, {r4 - r11, pc}
+ENDPROC(cpu_resume_after_mmu)
+
+/*
+ * Note: Yes, part of the following code is located into the .data section.
+ *       This is to allow sleep_save_sp to be accessed with a relative load
+ *       while we can't rely on any MMU translation.  We could have put
+ *       sleep_save_sp in the .text section as well, but some setups might
+ *       insist on it to be truly read-only.
+ */
+	.data
+	.align
+ENTRY(cpu_resume)
+#ifdef CONFIG_SMP
+	adr	r0, sleep_save_sp
+	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
+	ALT_UP(mov r1, #0)
+	and	r1, r1, #15
+	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
+#else
+	ldr	r0, sleep_save_sp	@ stack phys addr
+#endif
+	setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1  @ set SVC, irqs off
+	@ load phys pgd, stack, resume fn
+  ARM(	ldmia	r0!, {r1, sp, pc}	)
+THUMB(	ldmia	r0!, {r1, r2, r3}	)
+THUMB(	mov	sp, r2			)
+THUMB(	bx	r3			)
+ENDPROC(cpu_resume)
+
+sleep_save_sp:
+	.rept	CONFIG_NR_CPUS
+	.long	0				@ preserve stack phys ptr here
+	.endr
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp.c
new file mode 100644
index 0000000..22ad00a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp.c
@@ -0,0 +1,677 @@
+/*
+ *  linux/arch/arm/kernel/smp.c
+ *
+ *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/completion.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/idmap.h>
+#include <asm/topology.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+#include <asm/ptrace.h>
+#include <asm/localtimer.h>
+#include <asm/smp_plat.h>
+
+/*
+ * as from 2.5, kernels no longer have an init_tasks structure
+ * so we need some other way of telling a new secondary core
+ * where to place its SVC stack
+ */
+struct secondary_data secondary_data;
+
+enum ipi_msg_type {
+	IPI_TIMER = 2,
+	IPI_RESCHEDULE,
+	IPI_CALL_FUNC,
+	IPI_CALL_FUNC_SINGLE,
+	IPI_CPU_STOP,
+	IPI_CPU_BACKTRACE,
+};
+
+static DECLARE_COMPLETION(cpu_running);
+
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+	struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
+	struct task_struct *idle = ci->idle;
+	int ret;
+
+	/*
+	 * Spawn a new process manually, if not already done.
+	 * Grab a pointer to its task struct so we can mess with it
+	 */
+	if (!idle) {
+		idle = fork_idle(cpu);
+		if (IS_ERR(idle)) {
+			printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+			return PTR_ERR(idle);
+		}
+		ci->idle = idle;
+	} else {
+		/*
+		 * Since this idle thread is being re-used, call
+		 * init_idle() to reinitialize the thread structure.
+		 */
+		init_idle(idle, cpu);
+	}
+
+	/*
+	 * We need to tell the secondary core where to find
+	 * its stack and the page tables.
+	 */
+	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
+	secondary_data.pgdir = virt_to_phys(idmap_pgd);
+	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
+	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
+
+	/*
+	 * Now bring the CPU into our world.
+	 */
+	ret = boot_secondary(cpu, idle);
+	if (ret == 0) {
+		/*
+		 * CPU was successfully started, wait for it
+		 * to come online or time out.
+		 */
+		wait_for_completion_timeout(&cpu_running,
+						 msecs_to_jiffies(1000));
+
+		if (!cpu_online(cpu)) {
+			pr_crit("CPU%u: failed to come online\n", cpu);
+			ret = -EIO;
+		}
+	} else {
+		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+	}
+
+	secondary_data.stack = NULL;
+	secondary_data.pgdir = 0;
+
+	return ret;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void percpu_timer_stop(void);
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct task_struct *p;
+	int ret;
+
+	ret = platform_cpu_disable(cpu);
+	if (ret)
+		return ret;
+
+	/*
+	 * Take this CPU offline.  Once we clear this, we can't return,
+	 * and we must not schedule until we're ready to give up the cpu.
+	 */
+	set_cpu_online(cpu, false);
+
+	/*
+	 * OK - migrate IRQs away from this CPU
+	 */
+	migrate_irqs();
+
+	/*
+	 * Stop the local timer for this CPU.
+	 */
+	percpu_timer_stop();
+
+	/*
+	 * Flush user cache and TLB mappings, and then remove this CPU
+	 * from the vm mask set of all processes.
+	 */
+	flush_cache_all();
+	local_flush_tlb_all();
+
+	read_lock(&tasklist_lock);
+	for_each_process(p) {
+		if (p->mm)
+			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
+	}
+	read_unlock(&tasklist_lock);
+
+	return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+		pr_err("CPU%u: cpu didn't die\n", cpu);
+		return;
+	}
+	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+
+	if (!platform_cpu_kill(cpu))
+		printk("CPU%u: unable to kill\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	idle_task_exit();
+
+	local_irq_disable();
+	mb();
+
+	/* Tell __cpu_die() that this CPU is now safe to dispose of */
+	complete(&cpu_died);
+
+	/*
+	 * actual CPU shutdown procedure is at least platform (if not
+	 * CPU) specific.
+	 */
+	platform_cpu_die(cpu);
+
+	/*
+	 * Do not return to the idle loop - jump back to the secondary
+	 * cpu initialisation.  There's some initialisation which needs
+	 * to be repeated to undo the effects of taking the CPU offline.
+	 */
+	__asm__("mov	sp, %0\n"
+	"	mov	fp, #0\n"
+	"	b	secondary_start_kernel"
+		:
+		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+{
+	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
+
+	cpu_info->loops_per_jiffy = loops_per_jiffy;
+
+	store_cpu_topology(cpuid);
+}
+
+static void percpu_timer_setup(void);
+
+/*
+ * This is the secondary CPU boot entry.  We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+ */
+asmlinkage void __cpuinit secondary_start_kernel(void)
+{
+	struct mm_struct *mm = &init_mm;
+	unsigned int cpu;
+
+	/*
+	 * The identity mapping is uncached (strongly ordered), so
+	 * switch away from it before attempting any exclusive accesses.
+	 */
+	cpu_switch_mm(mm->pgd, mm);
+	enter_lazy_tlb(mm, current);
+	local_flush_tlb_all();
+
+	/*
+	 * All kernel threads share the same mm context; grab a
+	 * reference and switch to it.
+	 */
+	cpu = smp_processor_id();
+	atomic_inc(&mm->mm_count);
+	current->active_mm = mm;
+	cpumask_set_cpu(cpu, mm_cpumask(mm));
+
+	printk("CPU%u: Booted secondary processor\n", cpu);
+
+	cpu_init();
+	preempt_disable();
+	trace_hardirqs_off();
+
+	/*
+	 * Give the platform a chance to do its own initialisation.
+	 */
+	platform_secondary_init(cpu);
+
+	notify_cpu_starting(cpu);
+
+	calibrate_delay();
+
+	smp_store_cpu_info(cpu);
+
+	/*
+	 * OK, now it's safe to let the boot CPU continue.  Wait for
+	 * the CPU migration code to notice that the CPU is online
+	 * before we continue - which happens after __cpu_up returns.
+	 */
+	set_cpu_online(cpu, true);
+	complete(&cpu_running);
+
+	/*
+	 * Setup the percpu timer for this CPU.
+	 */
+	percpu_timer_setup();
+
+	local_irq_enable();
+	local_fiq_enable();
+
+	/*
+	 * OK, it's off to the idle thread for us
+	 */
+	cpu_idle();
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+	int cpu;
+	unsigned long bogosum = 0;
+
+	for_each_online_cpu(cpu)
+		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+	printk(KERN_INFO "SMP: Total of %d processors activated "
+	       "(%lu.%02lu BogoMIPS).\n",
+	       num_online_cpus(),
+	       bogosum / (500000/HZ),
+	       (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	per_cpu(cpu_data, cpu).idle = current;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned int ncores = num_possible_cpus();
+
+	init_cpu_topology();
+
+	smp_store_cpu_info(smp_processor_id());
+
+	/*
+	 * are we trying to boot more cores than exist?
+	 */
+	if (max_cpus > ncores)
+		max_cpus = ncores;
+	if (ncores > 1 && max_cpus) {
+		/*
+		 * Enable the local timer or broadcast device for the
+		 * boot CPU, but only if we have more than one CPU.
+		 */
+		percpu_timer_setup();
+
+		/*
+		 * Initialise the present map, which describes the set of CPUs
+		 * actually populated at the present time. A platform should
+		 * re-initialize the map in platform_smp_prepare_cpus() if
+		 * present != possible (e.g. physical hotplug).
+		 */
+		init_cpu_present(cpu_possible_mask);
+
+		/*
+		 * Initialise the SCU if there are more than one CPU
+		 * and let them know where to start.
+		 */
+		platform_smp_prepare_cpus(max_cpus);
+	}
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
+{
+	smp_cross_call = fn;
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+static const char *ipi_types[NR_IPI] = {
+#define S(x,s)	[x - IPI_TIMER] = s
+	S(IPI_TIMER, "Timer broadcast interrupts"),
+	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
+	S(IPI_CALL_FUNC, "Function call interrupts"),
+	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
+	S(IPI_CPU_STOP, "CPU stop interrupts"),
+	S(IPI_CPU_BACKTRACE, "CPU backtrace"),
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+	unsigned int cpu, i;
+
+	for (i = 0; i < NR_IPI; i++) {
+		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+
+		for_each_present_cpu(cpu)
+			seq_printf(p, "%10u ",
+				   __get_irq_stat(cpu, ipi_irqs[i]));
+
+		seq_printf(p, " %s\n", ipi_types[i]);
+	}
+}
+
+u64 smp_irq_stat_cpu(unsigned int cpu)
+{
+	u64 sum = 0;
+	int i;
+
+	for (i = 0; i < NR_IPI; i++)
+		sum += __get_irq_stat(cpu, ipi_irqs[i]);
+
+	return sum;
+}
+
+/*
+ * Timer (local or broadcast) support
+ */
+static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+
+static void ipi_timer(void)
+{
+	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
+	evt->event_handler(evt);
+}
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+static void smp_timer_broadcast(const struct cpumask *mask)
+{
+	smp_cross_call(mask, IPI_TIMER);
+}
+#else
+#define smp_timer_broadcast	NULL
+#endif
+
+static void broadcast_timer_set_mode(enum clock_event_mode mode,
+	struct clock_event_device *evt)
+{
+}
+
+static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
+{
+	evt->name	= "dummy_timer";
+	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
+			  CLOCK_EVT_FEAT_PERIODIC |
+			  CLOCK_EVT_FEAT_DUMMY;
+	evt->rating	= 400;
+	evt->mult	= 1;
+	evt->set_mode	= broadcast_timer_set_mode;
+
+	clockevents_register_device(evt);
+}
+
+static struct local_timer_ops *lt_ops;
+
+#ifdef CONFIG_LOCAL_TIMERS
+int local_timer_register(struct local_timer_ops *ops)
+{
+	if (lt_ops)
+		return -EBUSY;
+
+	lt_ops = ops;
+	return 0;
+}
+#endif
+
+static void __cpuinit percpu_timer_setup(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
+
+	evt->cpumask = cpumask_of(cpu);
+	evt->broadcast = smp_timer_broadcast;
+
+	if (!lt_ops || lt_ops->setup(evt))
+		broadcast_timer_setup(evt);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The generic clock events code purposely does not stop the local timer
+ * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
+ * manually here.
+ */
+static void percpu_timer_stop(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
+
+	if (lt_ops)
+		lt_ops->stop(evt);
+}
+#endif
+
+static DEFINE_RAW_SPINLOCK(stop_lock);
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+	if (system_state == SYSTEM_BOOTING ||
+	    system_state == SYSTEM_RUNNING) {
+		raw_spin_lock(&stop_lock);
+		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+		dump_stack();
+		raw_spin_unlock(&stop_lock);
+	}
+
+	set_cpu_online(cpu, false);
+
+	local_fiq_disable();
+	local_irq_disable();
+
+	while (1)
+		cpu_relax();
+}
+
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+	unsigned int this_cpu = smp_processor_id();
+	int i;
+
+	if (test_and_set_bit(0, &backtrace_flag))
+		/*
+		 * If there is already a trigger_all_cpu_backtrace() in progress
+		 * (backtrace_flag == 1), don't output double cpu dump infos.
+		 */
+		return;
+
+	cpumask_copy(&backtrace_mask, cpu_online_mask);
+	cpu_clear(this_cpu, backtrace_mask);
+
+	pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+	dump_stack();
+
+	pr_info("\nsending IPI to all other CPUs:\n");
+	smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+	/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+	for (i = 0; i < 10 * 1000; i++) {
+		if (cpumask_empty(&backtrace_mask))
+			break;
+		mdelay(1);
+	}
+
+	clear_bit(0, &backtrace_flag);
+	smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+	if (cpu_isset(cpu, backtrace_mask)) {
+		raw_spin_lock(&backtrace_lock);
+		pr_warning("IPI backtrace for cpu %d\n", cpu);
+		show_regs(regs);
+		raw_spin_unlock(&backtrace_lock);
+		cpu_clear(cpu, backtrace_mask);
+	}
+}
+
+/*
+ * Main handler for inter-processor interrupts
+ */
+asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
+{
+	handle_IPI(ipinr, regs);
+}
+
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+	unsigned int cpu = smp_processor_id();
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
+		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
+
+	switch (ipinr) {
+	case IPI_TIMER:
+		irq_enter();
+		ipi_timer();
+		irq_exit();
+		break;
+
+	case IPI_RESCHEDULE:
+		scheduler_ipi();
+		break;
+
+	case IPI_CALL_FUNC:
+		irq_enter();
+		generic_smp_call_function_interrupt();
+		irq_exit();
+		break;
+
+	case IPI_CALL_FUNC_SINGLE:
+		irq_enter();
+		generic_smp_call_function_single_interrupt();
+		irq_exit();
+		break;
+
+	case IPI_CPU_STOP:
+		irq_enter();
+		ipi_cpu_stop(cpu);
+		irq_exit();
+		break;
+
+	case IPI_CPU_BACKTRACE:
+		ipi_cpu_backtrace(cpu, regs);
+		break;
+
+	default:
+		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
+		       cpu, ipinr);
+		break;
+	}
+	set_irq_regs(old_regs);
+}
+
+void smp_send_reschedule(int cpu)
+{
+	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_kill_cpus(cpumask_t *mask)
+{
+	unsigned int cpu;
+	for_each_cpu(cpu, mask)
+		platform_cpu_kill(cpu);
+}
+#else
+static void smp_kill_cpus(cpumask_t *mask) { }
+#endif
+
+void smp_send_stop(void)
+{
+	unsigned long timeout;
+	struct cpumask mask;
+
+	cpumask_copy(&mask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &mask);
+	if (!cpumask_empty(&mask))
+		smp_cross_call(&mask, IPI_CPU_STOP);
+
+	/* Wait up to one second for other CPUs to stop */
+	timeout = USEC_PER_SEC;
+	while (num_online_cpus() > 1 && timeout--)
+		udelay(1);
+
+	if (num_online_cpus() > 1)
+		pr_warning("SMP: failed to stop secondary CPUs\n");
+
+	smp_kill_cpus(&mask);
+}
+
+/*
+ * not supported here
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+	return -EINVAL;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_scu.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_scu.c
new file mode 100644
index 0000000..8f5dd79
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_scu.c
@@ -0,0 +1,87 @@
+/*
+ *  linux/arch/arm/kernel/smp_scu.c
+ *
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/smp_scu.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+
+#define SCU_CTRL		0x00
+#define SCU_CONFIG		0x04
+#define SCU_CPU_STATUS		0x08
+#define SCU_INVALIDATE		0x0c
+#define SCU_FPGA_REVISION	0x10
+
+#ifdef CONFIG_SMP
+/*
+ * Get the number of CPU cores from the SCU configuration
+ */
+unsigned int __init scu_get_core_count(void __iomem *scu_base)
+{
+	unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+	return (ncores & 0x03) + 1;
+}
+
+/*
+ * Enable the SCU
+ */
+void scu_enable(void __iomem *scu_base)
+{
+	u32 scu_ctrl;
+
+#ifdef CONFIG_ARM_ERRATA_764369
+	/* Cortex-A9 only */
+	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+		scu_ctrl = __raw_readl(scu_base + 0x30);
+		if (!(scu_ctrl & 1))
+			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+	}
+#endif
+
+	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+	/* already enabled? */
+	if (scu_ctrl & 1)
+		return;
+
+	scu_ctrl |= 1;
+	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+
+	/*
+	 * Ensure that the data accessed by CPU0 before the SCU was
+	 * initialised is visible to the other CPUs.
+	 */
+	flush_cache_all();
+}
+#endif
+
+/*
+ * Set the executing CPUs power mode as defined.  This will be in
+ * preparation for it executing a WFI instruction.
+ *
+ * This function must be called with preemption disabled, and as it
+ * has the side effect of disabling coherency, caches must have been
+ * flushed.  Interrupts must also have been disabled.
+ */
+int scu_power_mode(void __iomem *scu_base, unsigned int mode)
+{
+	unsigned int val;
+	int cpu = smp_processor_id();
+
+	if (mode > 3 || mode == 1 || cpu > 3)
+		return -EINVAL;
+
+	val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+	val |= mode;
+	__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_tlb.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_tlb.c
new file mode 100644
index 0000000..02c5d2c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_tlb.c
@@ -0,0 +1,129 @@
+/*
+ *  linux/arch/arm/kernel/smp_tlb.c
+ *
+ *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/preempt.h>
+#include <linux/smp.h>
+
+#include <asm/smp_plat.h>
+#include <asm/tlbflush.h>
+
+/**********************************************************************/
+
+/*
+ * TLB operations
+ */
+struct tlb_args {
+	struct vm_area_struct *ta_vma;
+	unsigned long ta_start;
+	unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_all(void *ignored)
+{
+	local_flush_tlb_all();
+}
+
+static inline void ipi_flush_tlb_mm(void *arg)
+{
+	struct mm_struct *mm = (struct mm_struct *)arg;
+
+	local_flush_tlb_mm(mm);
+}
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+	struct tlb_args *ta = (struct tlb_args *)arg;
+
+	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_kernel_page(void *arg)
+{
+	struct tlb_args *ta = (struct tlb_args *)arg;
+
+	local_flush_tlb_kernel_page(ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+	struct tlb_args *ta = (struct tlb_args *)arg;
+
+	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+	struct tlb_args *ta = (struct tlb_args *)arg;
+
+	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+	if (tlb_ops_need_broadcast())
+		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+	else
+		local_flush_tlb_all();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (tlb_ops_need_broadcast())
+		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
+	else
+		local_flush_tlb_mm(mm);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+	if (tlb_ops_need_broadcast()) {
+		struct tlb_args ta;
+		ta.ta_vma = vma;
+		ta.ta_start = uaddr;
+		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
+					&ta, 1);
+	} else
+		local_flush_tlb_page(vma, uaddr);
+}
+
+void flush_tlb_kernel_page(unsigned long kaddr)
+{
+	if (tlb_ops_need_broadcast()) {
+		struct tlb_args ta;
+		ta.ta_start = kaddr;
+		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
+	} else
+		local_flush_tlb_kernel_page(kaddr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+                     unsigned long start, unsigned long end)
+{
+	if (tlb_ops_need_broadcast()) {
+		struct tlb_args ta;
+		ta.ta_vma = vma;
+		ta.ta_start = start;
+		ta.ta_end = end;
+		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
+					&ta, 1);
+	} else
+		local_flush_tlb_range(vma, start, end);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	if (tlb_ops_need_broadcast()) {
+		struct tlb_args ta;
+		ta.ta_start = start;
+		ta.ta_end = end;
+		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+	} else
+		local_flush_tlb_kernel_range(start, end);
+}
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_twd.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_twd.c
new file mode 100644
index 0000000..fef42b2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/smp_twd.c
@@ -0,0 +1,347 @@
+/*
+ *  linux/arch/arm/kernel/smp_twd.c
+ *
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/jiffies.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+#include <asm/hardware/gic.h>
+
+/* set up by the platform code */
+static void __iomem *twd_base;
+
+static struct clk *twd_clk;
+static unsigned long twd_timer_rate;
+
+static struct clock_event_device __percpu **twd_evt;
+static int twd_ppi;
+
+static void twd_set_mode(enum clock_event_mode mode,
+			struct clock_event_device *clk)
+{
+	unsigned long ctrl;
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		/* timer load already set up */
+		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
+			| TWD_TIMER_CONTROL_PERIODIC;
+		__raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+		break;
+	case CLOCK_EVT_MODE_ONESHOT:
+		/* period set, and timer enabled in 'next_event' hook */
+		ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
+		break;
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+	default:
+		ctrl = 0;
+	}
+
+	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+}
+
+static int twd_set_next_event(unsigned long evt,
+			struct clock_event_device *unused)
+{
+	unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+
+	ctrl |= TWD_TIMER_CONTROL_ENABLE;
+
+	__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
+	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+
+	return 0;
+}
+
+/*
+ * local_timer_ack: checks for a local timer interrupt.
+ *
+ * If a local timer interrupt has occurred, acknowledge and return 1.
+ * Otherwise, return 0.
+ */
+static int twd_timer_ack(void)
+{
+	if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
+		__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void twd_timer_stop(struct clock_event_device *clk)
+{
+	twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+	disable_percpu_irq(clk->irq);
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+	twd_timer_rate = clk_get_rate(twd_clk);
+
+	clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+	unsigned long state, void *data)
+{
+	struct cpufreq_freqs *freqs = data;
+
+	/*
+	 * The twd clock events must be reprogrammed to account for the new
+	 * frequency.  The timer is local to a cpu, so cross-call to the
+	 * changing cpu.
+	 */
+	if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+		smp_call_function_single(freqs->cpu, twd_update_frequency,
+			NULL, 1);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+	.notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+	if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+		return cpufreq_register_notifier(&twd_cpufreq_nb,
+			CPUFREQ_TRANSITION_NOTIFIER);
+
+	return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+#endif
+
+static void __cpuinit twd_calibrate_rate(void)
+{
+	unsigned long count;
+	u64 waitjiffies;
+
+	/*
+	 * If this is the first time round, we need to work out how fast
+	 * the timer ticks
+	 */
+	if (twd_timer_rate == 0) {
+		printk(KERN_INFO "Calibrating local timer... ");
+
+		/* Wait for a tick to start */
+		waitjiffies = get_jiffies_64() + 1;
+
+		while (get_jiffies_64() < waitjiffies)
+			udelay(10);
+
+		/* OK, now the tick has started, let's get the timer going */
+		waitjiffies += 5;
+
+				 /* enable, no interrupt or reload */
+		__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+
+				 /* maximum value */
+		__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+
+		while (get_jiffies_64() < waitjiffies)
+			udelay(10);
+
+		count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+
+		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
+
+		printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
+			(twd_timer_rate / 10000) % 100);
+	}
+}
+
+static irqreturn_t twd_handler(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+
+	if (twd_timer_ack()) {
+		evt->event_handler(evt);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static struct clk *twd_get_clock(void)
+{
+	struct clk *clk;
+	int err;
+
+	clk = clk_get_sys("smp_twd", NULL);
+	if (IS_ERR(clk)) {
+		pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+		return clk;
+	}
+
+	err = clk_prepare(clk);
+	if (err) {
+		pr_err("smp_twd: clock failed to prepare: %d\n", err);
+		clk_put(clk);
+		return ERR_PTR(err);
+	}
+
+	err = clk_enable(clk);
+	if (err) {
+		pr_err("smp_twd: clock failed to enable: %d\n", err);
+		clk_unprepare(clk);
+		clk_put(clk);
+		return ERR_PTR(err);
+	}
+
+	return clk;
+}
+
+/*
+ * Setup the local clock events for a CPU.
+ */
+static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
+{
+	struct clock_event_device **this_cpu_clk;
+
+	if (!twd_clk)
+		twd_clk = twd_get_clock();
+
+	if (!IS_ERR_OR_NULL(twd_clk))
+		twd_timer_rate = clk_get_rate(twd_clk);
+	else
+		twd_calibrate_rate();
+
+	__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+
+	clk->name = "local_timer";
+	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+			CLOCK_EVT_FEAT_C3STOP;
+	clk->rating = 350;
+	clk->set_mode = twd_set_mode;
+	clk->set_next_event = twd_set_next_event;
+	clk->irq = twd_ppi;
+
+	this_cpu_clk = __this_cpu_ptr(twd_evt);
+	*this_cpu_clk = clk;
+
+	clockevents_config_and_register(clk, twd_timer_rate,
+					0xf, 0xffffffff);
+	enable_percpu_irq(clk->irq, 0);
+
+	return 0;
+}
+
+static struct local_timer_ops twd_lt_ops __cpuinitdata = {
+	.setup	= twd_timer_setup,
+	.stop	= twd_timer_stop,
+};
+
+static int __init twd_local_timer_common_register(void)
+{
+	int err;
+
+	twd_evt = alloc_percpu(struct clock_event_device *);
+	if (!twd_evt) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
+	if (err) {
+		pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
+		goto out_free;
+	}
+
+	err = local_timer_register(&twd_lt_ops);
+	if (err)
+		goto out_irq;
+
+	return 0;
+
+out_irq:
+	free_percpu_irq(twd_ppi, twd_evt);
+out_free:
+	iounmap(twd_base);
+	twd_base = NULL;
+	free_percpu(twd_evt);
+
+	return err;
+}
+
+int __init twd_local_timer_register(struct twd_local_timer *tlt)
+{
+	if (twd_base || twd_evt)
+		return -EBUSY;
+
+	twd_ppi	= tlt->res[1].start;
+
+	twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
+	if (!twd_base)
+		return -ENOMEM;
+
+	return twd_local_timer_common_register();
+}
+
+#ifdef CONFIG_OF
+const static struct of_device_id twd_of_match[] __initconst = {
+	{ .compatible = "arm,cortex-a9-twd-timer",	},
+	{ .compatible = "arm,cortex-a5-twd-timer",	},
+	{ .compatible = "arm,arm11mp-twd-timer",	},
+	{ },
+};
+
+void __init twd_local_timer_of_register(void)
+{
+	struct device_node *np;
+	int err;
+
+	np = of_find_matching_node(NULL, twd_of_match);
+	if (!np) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	twd_ppi = irq_of_parse_and_map(np, 0);
+	if (!twd_ppi) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	twd_base = of_iomap(np, 0);
+	if (!twd_base) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = twd_local_timer_common_register();
+
+out:
+	WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
+}
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/stacktrace.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/stacktrace.c
new file mode 100644
index 0000000..6582c4a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/stacktrace.c
@@ -0,0 +1,139 @@
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
+/*
+ * Unwind the current stack frame and store the new register values in the
+ * structure passed as argument. Unwinding is equivalent to a function return,
+ * hence the new PC value rather than LR should be used for backtrace.
+ *
+ * With framepointer enabled, a simple function prologue looks like this:
+ *	mov	ip, sp
+ *	stmdb	sp!, {fp, ip, lr, pc}
+ *	sub	fp, ip, #4
+ *
+ * A simple function epilogue looks like this:
+ *	ldm	sp, {fp, sp, pc}
+ *
+ * Note that with framepointer enabled, even the leaf functions have the same
+ * prologue and epilogue, therefore we can ignore the LR value in this case.
+ */
+int notrace unwind_frame(struct stackframe *frame)
+{
+	unsigned long high, low;
+	unsigned long fp = frame->fp;
+
+	/* only go to a higher address on the stack */
+	low = frame->sp;
+	high = ALIGN(low, THREAD_SIZE);
+
+	/* check current frame pointer is within bounds */
+	if (fp < low + 12 || fp > high - 4)
+		return -EINVAL;
+
+	/* restore the registers from the stack frame */
+	frame->fp = *(unsigned long *)(fp - 12);
+	frame->sp = *(unsigned long *)(fp - 8);
+	frame->pc = *(unsigned long *)(fp - 4);
+
+	return 0;
+}
+#endif
+
+void notrace walk_stackframe(struct stackframe *frame,
+		     int (*fn)(struct stackframe *, void *), void *data)
+{
+	while (1) {
+		int ret;
+
+		if (fn(frame, data))
+			break;
+		ret = unwind_frame(frame);
+		if (ret < 0)
+			break;
+	}
+}
+EXPORT_SYMBOL(walk_stackframe);
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace_data {
+	struct stack_trace *trace;
+	unsigned int no_sched_functions;
+	unsigned int skip;
+};
+
+static int save_trace(struct stackframe *frame, void *d)
+{
+	struct stack_trace_data *data = d;
+	struct stack_trace *trace = data->trace;
+	unsigned long addr = frame->pc;
+
+	if (data->no_sched_functions && in_sched_functions(addr))
+		return 0;
+	if (data->skip) {
+		data->skip--;
+		return 0;
+	}
+
+	trace->entries[trace->nr_entries++] = addr;
+
+	return trace->nr_entries >= trace->max_entries;
+}
+
+/* This must be noinline to so that our skip calculation works correctly */
+static noinline void __save_stack_trace(struct task_struct *tsk,
+	struct stack_trace *trace, unsigned int nosched)
+{
+	struct stack_trace_data data;
+	struct stackframe frame;
+
+	data.trace = trace;
+	data.skip = trace->skip;
+	data.no_sched_functions = nosched;
+
+	if (tsk != current) {
+#ifdef CONFIG_SMP
+		/*
+		 * What guarantees do we have here that 'tsk' is not
+		 * running on another CPU?  For now, ignore it as we
+		 * can't guarantee we won't explode.
+		 */
+		if (trace->nr_entries < trace->max_entries)
+			trace->entries[trace->nr_entries++] = ULONG_MAX;
+		return;
+#else
+		frame.fp = thread_saved_fp(tsk);
+		frame.sp = thread_saved_sp(tsk);
+		frame.lr = 0;		/* recovered from the stack */
+		frame.pc = thread_saved_pc(tsk);
+#endif
+	} else {
+		register unsigned long current_sp asm ("sp");
+
+		/* We don't want this function nor the caller */
+		data.skip += 2;
+		frame.fp = (unsigned long)__builtin_frame_address(0);
+		frame.sp = current_sp;
+		frame.lr = (unsigned long)__builtin_return_address(0);
+		frame.pc = (unsigned long)__save_stack_trace;
+	}
+
+	walk_stackframe(&frame, save_trace, &data);
+	if (trace->nr_entries < trace->max_entries)
+		trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	__save_stack_trace(tsk, trace, 1);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	__save_stack_trace(current, trace, 0);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/suspend.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/suspend.c
new file mode 100644
index 0000000..1794cc3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/suspend.c
@@ -0,0 +1,60 @@
+#include <linux/init.h>
+
+#include <asm/idmap.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/memory.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern void cpu_resume_mmu(void);
+
+/*
+ * This is called by __cpu_suspend() to save the state, and do whatever
+ * flushing is required to ensure that when the CPU goes to sleep we have
+ * the necessary data available when the caches are not searched.
+ */
+void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
+{
+	*save_ptr = virt_to_phys(ptr);
+
+	/* This must correspond to the LDM in cpu_resume() assembly */
+	*ptr++ = virt_to_phys(idmap_pgd);
+	*ptr++ = sp;
+	*ptr++ = virt_to_phys(cpu_do_resume);
+
+	cpu_do_suspend(ptr);
+
+	flush_cache_all();
+	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
+	outer_clean_range(virt_to_phys(save_ptr),
+			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
+}
+
+/*
+ * Hide the first two arguments to __cpu_suspend - these are an implementation
+ * detail which platform code shouldn't have to know about.
+ */
+int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+	struct mm_struct *mm = current->active_mm;
+	int ret;
+
+	if (!idmap_pgd)
+		return -EINVAL;
+
+	/*
+	 * Provide a temporary page table with an identity mapping for
+	 * the MMU-enable code, required for resuming.  On successful
+	 * resume (indicated by a zero return code), we need to switch
+	 * back to the correct page tables.
+	 */
+	ret = __cpu_suspend(arg, fn);
+	if (ret == 0) {
+		cpu_switch_mm(mm->pgd, mm);
+		local_flush_tlb_all();
+	}
+
+	return ret;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/swp_emulate.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/swp_emulate.c
new file mode 100644
index 0000000..ab1017b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/swp_emulate.c
@@ -0,0 +1,285 @@
+/*
+ *  linux/arch/arm/kernel/swp_emulate.c
+ *
+ *  Copyright (C) 2009 ARM Limited
+ *  __user_* functions adapted from include/asm/uaccess.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Implements emulation of the SWP/SWPB instructions using load-exclusive and
+ *  store-exclusive for processors that have them disabled (or future ones that
+ *  might not implement them).
+ *
+ *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
+ *  Where: Rt  = destination
+ *	   Rt2 = source
+ *	   Rn  = address
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/perf_event.h>
+
+#include <asm/opcodes.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+
+/*
+ * Error-checking SWP macros implemented using ldrex{b}/strex{b}
+ */
+#define __user_swpX_asm(data, addr, res, temp, B)		\
+	__asm__ __volatile__(					\
+	"	mov		%2, %1\n"			\
+	"0:	ldrex"B"	%1, [%3]\n"			\
+	"1:	strex"B"	%0, %2, [%3]\n"			\
+	"	cmp		%0, #0\n"			\
+	"	movne		%0, %4\n"			\
+	"2:\n"							\
+	"	.section	 .fixup,\"ax\"\n"		\
+	"	.align		2\n"				\
+	"3:	mov		%0, %5\n"			\
+	"	b		2b\n"				\
+	"	.previous\n"					\
+	"	.section	 __ex_table,\"a\"\n"		\
+	"	.align		3\n"				\
+	"	.long		0b, 3b\n"			\
+	"	.long		1b, 3b\n"			\
+	"	.previous"					\
+	: "=&r" (res), "+r" (data), "=&r" (temp)		\
+	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
+	: "cc", "memory")
+
+#define __user_swp_asm(data, addr, res, temp) \
+	__user_swpX_asm(data, addr, res, temp, "")
+#define __user_swpb_asm(data, addr, res, temp) \
+	__user_swpX_asm(data, addr, res, temp, "b")
+
+/*
+ * Macros/defines for extracting register numbers from instruction.
+ */
+#define EXTRACT_REG_NUM(instruction, offset) \
+	(((instruction) & (0xf << (offset))) >> (offset))
+#define RN_OFFSET  16
+#define RT_OFFSET  12
+#define RT2_OFFSET  0
+/*
+ * Bit 22 of the instruction encoding distinguishes between
+ * the SWP and SWPB variants (bit set means SWPB).
+ */
+#define TYPE_SWPB (1 << 22)
+
+static unsigned long swpcounter;
+static unsigned long swpbcounter;
+static unsigned long abtcounter;
+static pid_t         previous_pid;
+
+#ifdef CONFIG_PROC_FS
+static int proc_read_status(char *page, char **start, off_t off, int count,
+			    int *eof, void *data)
+{
+	char *p = page;
+	int len;
+
+	p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
+	p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
+	p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
+	if (previous_pid != 0)
+		p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
+
+	len = (p - page) - off;
+	if (len < 0)
+		len = 0;
+
+	*eof = (len <= count) ? 1 : 0;
+	*start = page + off;
+
+	return len;
+}
+#endif
+
+/*
+ * Set up process info to signal segmentation fault - called on access error.
+ */
+static void set_segfault(struct pt_regs *regs, unsigned long addr)
+{
+	siginfo_t info;
+
+	down_read(&current->mm->mmap_sem);
+	if (find_vma(current->mm, addr) == NULL)
+		info.si_code = SEGV_MAPERR;
+	else
+		info.si_code = SEGV_ACCERR;
+	up_read(&current->mm->mmap_sem);
+
+	info.si_signo = SIGSEGV;
+	info.si_errno = 0;
+	info.si_addr  = (void *) instruction_pointer(regs);
+
+	pr_debug("SWP{B} emulation: access caused memory abort!\n");
+	arm_notify_die("Illegal memory access", regs, &info, 0, 0);
+
+	abtcounter++;
+}
+
+static int emulate_swpX(unsigned int address, unsigned int *data,
+			unsigned int type)
+{
+	unsigned int res = 0;
+
+	if ((type != TYPE_SWPB) && (address & 0x3)) {
+		/* SWP to unaligned address not permitted */
+		pr_debug("SWP instruction on unaligned pointer!\n");
+		return -EFAULT;
+	}
+
+	while (1) {
+		unsigned long temp;
+
+		/*
+		 * Barrier required between accessing protected resource and
+		 * releasing a lock for it. Legacy code might not have done
+		 * this, and we cannot determine that this is not the case
+		 * being emulated, so insert always.
+		 */
+		smp_mb();
+
+		if (type == TYPE_SWPB)
+			__user_swpb_asm(*data, address, res, temp);
+		else
+			__user_swp_asm(*data, address, res, temp);
+
+		if (likely(res != -EAGAIN) || signal_pending(current))
+			break;
+
+		cond_resched();
+	}
+
+	if (res == 0) {
+		/*
+		 * Barrier also required between acquiring a lock for a
+		 * protected resource and accessing the resource. Inserted for
+		 * same reason as above.
+		 */
+		smp_mb();
+
+		if (type == TYPE_SWPB)
+			swpbcounter++;
+		else
+			swpcounter++;
+	}
+
+	return res;
+}
+
+/*
+ * swp_handler logs the id of calling process, dissects the instruction, sanity
+ * checks the memory location, calls emulate_swpX for the actual operation and
+ * deals with fixup/error handling before returning
+ */
+static int swp_handler(struct pt_regs *regs, unsigned int instr)
+{
+	unsigned int address, destreg, data, type;
+	unsigned int res = 0;
+
+	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
+
+	res = arm_check_condition(instr, regs->ARM_cpsr);
+	switch (res) {
+	case ARM_OPCODE_CONDTEST_PASS:
+		break;
+	case ARM_OPCODE_CONDTEST_FAIL:
+		/* Condition failed - return to next instruction */
+		regs->ARM_pc += 4;
+		return 0;
+	case ARM_OPCODE_CONDTEST_UNCOND:
+		/* If unconditional encoding - not a SWP, undef */
+		return -EFAULT;
+	default:
+		return -EINVAL;
+	}
+
+	if (current->pid != previous_pid) {
+		pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
+			 current->comm, (unsigned long)current->pid);
+		previous_pid = current->pid;
+	}
+
+	address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
+	data	= regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
+	destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
+
+	type = instr & TYPE_SWPB;
+
+	pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
+		 EXTRACT_REG_NUM(instr, RN_OFFSET), address,
+		 destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
+
+	/* Check access in reasonable access range for both SWP and SWPB */
+	if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+		pr_debug("SWP{B} emulation: access to %p not allowed!\n",
+			 (void *)address);
+		res = -EFAULT;
+	} else {
+		res = emulate_swpX(address, &data, type);
+	}
+
+	if (res == 0) {
+		/*
+		 * On successful emulation, revert the adjustment to the PC
+		 * made in kernel/traps.c in order to resume execution at the
+		 * instruction following the SWP{B}.
+		 */
+		regs->ARM_pc += 4;
+		regs->uregs[destreg] = data;
+	} else if (res == -EFAULT) {
+		/*
+		 * Memory errors do not mean emulation failed.
+		 * Set up signal info to return SEGV, then return OK
+		 */
+		set_segfault(regs, address);
+	}
+
+	return 0;
+}
+
+/*
+ * Only emulate SWP/SWPB executed in ARM state/User mode.
+ * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
+ */
+static struct undef_hook swp_hook = {
+	.instr_mask = 0x0fb00ff0,
+	.instr_val  = 0x01000090,
+	.cpsr_mask  = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
+	.cpsr_val   = USR_MODE,
+	.fn	    = swp_handler
+};
+
+/*
+ * Register handler and create status file in /proc/cpu
+ * Invoked as late_initcall, since not needed before init spawned.
+ */
+static int __init swp_emulation_init(void)
+{
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *res;
+
+	res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
+
+	if (!res)
+		return -ENOMEM;
+
+	res->read_proc = proc_read_status;
+#endif /* CONFIG_PROC_FS */
+
+	printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
+	register_undef_hook(&swp_hook);
+
+	return 0;
+}
+
+late_initcall(swp_emulation_init);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_arm.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_arm.c
new file mode 100644
index 0000000..0e766b1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_arm.c
@@ -0,0 +1,137 @@
+/*
+ *  linux/arch/arm/kernel/sys_arm.c
+ *
+ *  Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
+ *  Copyright (C) 1995, 1996 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file contains various random system calls that
+ *  have a non-standard calling sequence on the Linux/arm
+ *  platform.
+ */
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+/* Fork a new task - this creates a new program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_fork(struct pt_regs *regs)
+{
+#ifdef CONFIG_MMU
+	return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
+#else
+	/* can not support in nommu mode */
+	return(-EINVAL);
+#endif
+}
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+			 int __user *parent_tidptr, int tls_val,
+			 int __user *child_tidptr, struct pt_regs *regs)
+{
+#ifdef CONFIG_STACK_SIZE
+	unsigned long flags = CLONE_8K_STACK;
+	clone_flags = CLONE_8K_STACK | clone_flags;
+#endif
+	if (!newsp)
+		newsp = regs->ARM_sp;
+
+	return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
+}
+
+asmlinkage int sys_vfork(struct pt_regs *regs)
+{
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_execve(const char __user *filenamei,
+			  const char __user *const __user *argv,
+			  const char __user *const __user *envp, struct pt_regs *regs)
+{
+	int error;
+	char * filename;
+
+	filename = getname(filenamei);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		goto out;
+	error = do_execve(filename, argv, envp, regs);
+	putname(filename);
+out:
+	return error;
+}
+
+int kernel_execve(const char *filename,
+		  const char *const argv[],
+		  const char *const envp[])
+{
+	struct pt_regs regs;
+	int ret;
+
+	memset(&regs, 0, sizeof(struct pt_regs));
+	ret = do_execve(filename,
+			(const char __user *const __user *)argv,
+			(const char __user *const __user *)envp, &regs);
+	if (ret < 0)
+		goto out;
+
+	/*
+	 * Save argc to the register structure for userspace.
+	 */
+	regs.ARM_r0 = ret;
+
+	/*
+	 * We were successful.  We won't be returning to our caller, but
+	 * instead to user space by manipulating the kernel stack.
+	 */
+	asm(	"add	r0, %0, %1\n\t"
+		"mov	r1, %2\n\t"
+		"mov	r2, %3\n\t"
+		"bl	memmove\n\t"	/* copy regs to top of stack */
+		"mov	r8, #0\n\t"	/* not a syscall */
+		"mov	r9, %0\n\t"	/* thread structure */
+		"mov	sp, r0\n\t"	/* reposition stack pointer */
+		"b	ret_to_user"
+		:
+		: "r" (current_thread_info()),
+		  "Ir" (THREAD_START_SP - sizeof(regs)),
+		  "r" (&regs),
+		  "Ir" (sizeof(regs))
+		: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
+
+ out:
+	return ret;
+}
+EXPORT_SYMBOL(kernel_execve);
+
+/*
+ * Since loff_t is a 64 bit type we avoid a lot of ABI hassle
+ * with a different argument ordering.
+ */
+asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
+				     loff_t offset, loff_t len)
+{
+	return sys_fadvise64_64(fd, offset, len, advice);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_oabi-compat.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_oabi-compat.c
new file mode 100644
index 0000000..af0aaeb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/sys_oabi-compat.c
@@ -0,0 +1,453 @@
+/*
+ *  arch/arm/kernel/sys_oabi-compat.c
+ *
+ *  Compatibility wrappers for syscalls that are used from
+ *  old ABI user space binaries with an EABI kernel.
+ *
+ *  Author:	Nicolas Pitre
+ *  Created:	Oct 7, 2005
+ *  Copyright:	MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+/*
+ * The legacy ABI and the new ARM EABI have different rules making some
+ * syscalls incompatible especially with structure arguments.
+ * Most notably, Eabi says 64-bit members should be 64-bit aligned instead of
+ * simply word aligned.  EABI also pads structures to the size of the largest
+ * member it contains instead of the invariant 32-bit.
+ *
+ * The following syscalls are affected:
+ *
+ * sys_stat64:
+ * sys_lstat64:
+ * sys_fstat64:
+ * sys_fstatat64:
+ *
+ *   struct stat64 has different sizes and some members are shifted
+ *   Compatibility wrappers are needed for them and provided below.
+ *
+ * sys_fcntl64:
+ *
+ *   struct flock64 has different sizes and some members are shifted
+ *   A compatibility wrapper is needed and provided below.
+ *
+ * sys_statfs64:
+ * sys_fstatfs64:
+ *
+ *   struct statfs64 has extra padding with EABI growing its size from
+ *   84 to 88.  This struct is now __attribute__((packed,aligned(4)))
+ *   with a small assembly wrapper to force the sz argument to 84 if it is 88
+ *   to avoid copying the extra padding over user space unexpecting it.
+ *
+ * sys_newuname:
+ *
+ *   struct new_utsname has no padding with EABI.  No problem there.
+ *
+ * sys_epoll_ctl:
+ * sys_epoll_wait:
+ *
+ *   struct epoll_event has its second member shifted also affecting the
+ *   structure size. Compatibility wrappers are needed and provided below.
+ *
+ * sys_ipc:
+ * sys_semop:
+ * sys_semtimedop:
+ *
+ *   struct sembuf loses its padding with EABI.  Since arrays of them are
+ *   used they have to be copyed to remove the padding. Compatibility wrappers
+ *   provided below.
+ *
+ * sys_bind:
+ * sys_connect:
+ * sys_sendmsg:
+ * sys_sendto:
+ * sys_socketcall:
+ *
+ *   struct sockaddr_un loses its padding with EABI.  Since the size of the
+ *   structure is used as a validation test in unix_mkname(), we need to
+ *   change the length argument to 110 whenever it is 112.  Compatibility
+ *   wrappers provided below.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/eventpoll.h>
+#include <linux/sem.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+struct oldabi_stat64 {
+	unsigned long long st_dev;
+	unsigned int	__pad1;
+	unsigned long	__st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+
+	unsigned long	st_uid;
+	unsigned long	st_gid;
+
+	unsigned long long st_rdev;
+	unsigned int	__pad2;
+
+	long long	st_size;
+	unsigned long	st_blksize;
+	unsigned long long st_blocks;
+
+	unsigned long	st_atime;
+	unsigned long	st_atime_nsec;
+
+	unsigned long	st_mtime;
+	unsigned long	st_mtime_nsec;
+
+	unsigned long	st_ctime;
+	unsigned long	st_ctime_nsec;
+
+	unsigned long long st_ino;
+} __attribute__ ((packed,aligned(4)));
+
+static long cp_oldabi_stat64(struct kstat *stat,
+			     struct oldabi_stat64 __user *statbuf)
+{
+	struct oldabi_stat64 tmp;
+
+	tmp.st_dev = huge_encode_dev(stat->dev);
+	tmp.__pad1 = 0;
+	tmp.__st_ino = stat->ino;
+	tmp.st_mode = stat->mode;
+	tmp.st_nlink = stat->nlink;
+	tmp.st_uid = stat->uid;
+	tmp.st_gid = stat->gid;
+	tmp.st_rdev = huge_encode_dev(stat->rdev);
+	tmp.st_size = stat->size;
+	tmp.st_blocks = stat->blocks;
+	tmp.__pad2 = 0;
+	tmp.st_blksize = stat->blksize;
+	tmp.st_atime = stat->atime.tv_sec;
+	tmp.st_atime_nsec = stat->atime.tv_nsec;
+	tmp.st_mtime = stat->mtime.tv_sec;
+	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
+	tmp.st_ctime = stat->ctime.tv_sec;
+	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
+	tmp.st_ino = stat->ino;
+	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys_oabi_stat64(const char __user * filename,
+				struct oldabi_stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_stat(filename, &stat);
+	if (!error)
+		error = cp_oldabi_stat64(&stat, statbuf);
+	return error;
+}
+
+asmlinkage long sys_oabi_lstat64(const char __user * filename,
+				 struct oldabi_stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_lstat(filename, &stat);
+	if (!error)
+		error = cp_oldabi_stat64(&stat, statbuf);
+	return error;
+}
+
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+				 struct oldabi_stat64 __user * statbuf)
+{
+	struct kstat stat;
+	int error = vfs_fstat(fd, &stat);
+	if (!error)
+		error = cp_oldabi_stat64(&stat, statbuf);
+	return error;
+}
+
+asmlinkage long sys_oabi_fstatat64(int dfd,
+				   const char __user *filename,
+				   struct oldabi_stat64  __user *statbuf,
+				   int flag)
+{
+	struct kstat stat;
+	int error;
+
+	error = vfs_fstatat(dfd, filename, &stat, flag);
+	if (error)
+		return error;
+	return cp_oldabi_stat64(&stat, statbuf);
+}
+
+struct oabi_flock64 {
+	short	l_type;
+	short	l_whence;
+	loff_t	l_start;
+	loff_t	l_len;
+	pid_t	l_pid;
+} __attribute__ ((packed,aligned(4)));
+
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+				 unsigned long arg)
+{
+	struct oabi_flock64 user;
+	struct flock64 kernel;
+	mm_segment_t fs = USER_DS; /* initialized to kill a warning */
+	unsigned long local_arg = arg;
+	int ret;
+
+	switch (cmd) {
+	case F_GETLK64:
+	case F_SETLK64:
+	case F_SETLKW64:
+		if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
+				   sizeof(user)))
+			return -EFAULT;
+		kernel.l_type	= user.l_type;
+		kernel.l_whence	= user.l_whence;
+		kernel.l_start	= user.l_start;
+		kernel.l_len	= user.l_len;
+		kernel.l_pid	= user.l_pid;
+		local_arg = (unsigned long)&kernel;
+		fs = get_fs();
+		set_fs(KERNEL_DS);
+	}
+
+	ret = sys_fcntl64(fd, cmd, local_arg);
+
+	switch (cmd) {
+	case F_GETLK64:
+		if (!ret) {
+			user.l_type	= kernel.l_type;
+			user.l_whence	= kernel.l_whence;
+			user.l_start	= kernel.l_start;
+			user.l_len	= kernel.l_len;
+			user.l_pid	= kernel.l_pid;
+			if (copy_to_user((struct oabi_flock64 __user *)arg,
+					 &user, sizeof(user)))
+				ret = -EFAULT;
+		}
+	case F_SETLK64:
+	case F_SETLKW64:
+		set_fs(fs);
+	}
+
+	return ret;
+}
+
+struct oabi_epoll_event {
+	__u32 events;
+	__u64 data;
+} __attribute__ ((packed,aligned(4)));
+
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+				   struct oabi_epoll_event __user *event)
+{
+	struct oabi_epoll_event user;
+	struct epoll_event kernel;
+	mm_segment_t fs;
+	long ret;
+
+	if (op == EPOLL_CTL_DEL)
+		return sys_epoll_ctl(epfd, op, fd, NULL);
+	if (copy_from_user(&user, event, sizeof(user)))
+		return -EFAULT;
+	kernel.events = user.events;
+	kernel.data   = user.data;
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = sys_epoll_ctl(epfd, op, fd, &kernel);
+	set_fs(fs);
+	return ret;
+}
+
+asmlinkage long sys_oabi_epoll_wait(int epfd,
+				    struct oabi_epoll_event __user *events,
+				    int maxevents, int timeout)
+{
+	struct epoll_event *kbuf;
+	mm_segment_t fs;
+	long ret, err, i;
+
+	if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+		return -EINVAL;
+	kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
+	set_fs(fs);
+	err = 0;
+	for (i = 0; i < ret; i++) {
+		__put_user_error(kbuf[i].events, &events->events, err);
+		__put_user_error(kbuf[i].data,   &events->data,   err);
+		events++;
+	}
+	kfree(kbuf);
+	return err ? -EFAULT : ret;
+}
+
+struct oabi_sembuf {
+	unsigned short	sem_num;
+	short		sem_op;
+	short		sem_flg;
+	unsigned short	__pad;
+};
+
+asmlinkage long sys_oabi_semtimedop(int semid,
+				    struct oabi_sembuf __user *tsops,
+				    unsigned nsops,
+				    const struct timespec __user *timeout)
+{
+	struct sembuf *sops;
+	struct timespec local_timeout;
+	long err;
+	int i;
+
+	if (nsops < 1 || nsops > SEMOPM)
+		return -EINVAL;
+	sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+	if (!sops)
+		return -ENOMEM;
+	err = 0;
+	for (i = 0; i < nsops; i++) {
+		__get_user_error(sops[i].sem_num, &tsops->sem_num, err);
+		__get_user_error(sops[i].sem_op,  &tsops->sem_op,  err);
+		__get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+		tsops++;
+	}
+	if (timeout) {
+		/* copy this as well before changing domain protection */
+		err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
+		timeout = &local_timeout;
+	}
+	if (err) {
+		err = -EFAULT;
+	} else {
+		mm_segment_t fs = get_fs();
+		set_fs(KERNEL_DS);
+		err = sys_semtimedop(semid, sops, nsops, timeout);
+		set_fs(fs);
+	}
+	kfree(sops);
+	return err;
+}
+
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+			       unsigned nsops)
+{
+	return sys_oabi_semtimedop(semid, tsops, nsops, NULL);
+}
+
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+			    void __user *ptr, long fifth)
+{
+	switch (call & 0xffff) {
+	case SEMOP:
+		return  sys_oabi_semtimedop(first,
+					    (struct oabi_sembuf __user *)ptr,
+					    second, NULL);
+	case SEMTIMEDOP:
+		return  sys_oabi_semtimedop(first,
+					    (struct oabi_sembuf __user *)ptr,
+					    second,
+					    (const struct timespec __user *)fifth);
+	default:
+		return sys_ipc(call, first, second, third, ptr, fifth);
+	}
+}
+
+asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen)
+{
+	sa_family_t sa_family;
+	if (addrlen == 112 &&
+	    get_user(sa_family, &addr->sa_family) == 0 &&
+	    sa_family == AF_UNIX)
+			addrlen = 110;
+	return sys_bind(fd, addr, addrlen);
+}
+
+asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen)
+{
+	sa_family_t sa_family;
+	if (addrlen == 112 &&
+	    get_user(sa_family, &addr->sa_family) == 0 &&
+	    sa_family == AF_UNIX)
+			addrlen = 110;
+	return sys_connect(fd, addr, addrlen);
+}
+
+asmlinkage long sys_oabi_sendto(int fd, void __user *buff,
+				size_t len, unsigned flags,
+				struct sockaddr __user *addr,
+				int addrlen)
+{
+	sa_family_t sa_family;
+	if (addrlen == 112 &&
+	    get_user(sa_family, &addr->sa_family) == 0 &&
+	    sa_family == AF_UNIX)
+			addrlen = 110;
+	return sys_sendto(fd, buff, len, flags, addr, addrlen);
+}
+
+asmlinkage long sys_oabi_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+{
+	struct sockaddr __user *addr;
+	int msg_namelen;
+	sa_family_t sa_family;
+	if (msg &&
+	    get_user(msg_namelen, &msg->msg_namelen) == 0 &&
+	    msg_namelen == 112 &&
+	    get_user(addr, &msg->msg_name) == 0 &&
+	    get_user(sa_family, &addr->sa_family) == 0 &&
+	    sa_family == AF_UNIX)
+	{
+		/*
+		 * HACK ALERT: there is a limit to how much backward bending
+		 * we should do for what is actually a transitional
+		 * compatibility layer.  This already has known flaws with
+		 * a few ioctls that we don't intend to fix.  Therefore
+		 * consider this blatent hack as another one... and take care
+		 * to run for cover.  In most cases it will "just work fine".
+		 * If it doesn't, well, tough.
+		 */
+		put_user(110, &msg->msg_namelen);
+	}
+	return sys_sendmsg(fd, msg, flags);
+}
+
+asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
+{
+	unsigned long r = -EFAULT, a[6];
+
+	switch (call) {
+	case SYS_BIND:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]);
+		break;
+	case SYS_CONNECT:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]);
+		break;
+	case SYS_SENDTO:
+		if (copy_from_user(a, args, 6 * sizeof(long)) == 0)
+			r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3],
+					    (struct sockaddr __user *)a[4], a[5]);
+		break;
+	case SYS_SENDMSG:
+		if (copy_from_user(a, args, 3 * sizeof(long)) == 0)
+			r = sys_oabi_sendmsg(a[0], (struct msghdr __user *)a[1], a[2]);
+		break;
+	default:
+		r = sys_socketcall(call, args);
+	}
+
+	return r;
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.c
new file mode 100644
index 0000000..30ae6bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/genalloc.h>
+#include <linux/string.h> /* memcpy */
+#include <asm/cputype.h>
+#include <asm/mach/map.h>
+#include <asm/memory.h>
+#include <asm/system_info.h>
+#include "tcm.h"
+
+static struct gen_pool *tcm_pool;
+static bool dtcm_present;
+static bool itcm_present;
+
+/* TCM section definitions from the linker */
+extern char __itcm_start, __sitcm_text, __eitcm_text;
+extern char __dtcm_start, __sdtcm_data, __edtcm_data;
+
+/* These will be increased as we run */
+u32 dtcm_end = DTCM_OFFSET;
+u32 itcm_end = ITCM_OFFSET;
+
+/*
+ * TCM memory resources
+ */
+static struct resource dtcm_res = {
+	.name = "DTCM RAM",
+	.start = DTCM_OFFSET,
+	.end = DTCM_OFFSET,
+	.flags = IORESOURCE_MEM
+};
+
+static struct resource itcm_res = {
+	.name = "ITCM RAM",
+	.start = ITCM_OFFSET,
+	.end = ITCM_OFFSET,
+	.flags = IORESOURCE_MEM
+};
+
+static struct map_desc dtcm_iomap[] __initdata = {
+	{
+		.virtual	= DTCM_OFFSET,
+		.pfn		= __phys_to_pfn(DTCM_OFFSET),
+		.length		= 0,
+		.type		= MT_MEMORY_DTCM
+	}
+};
+
+static struct map_desc itcm_iomap[] __initdata = {
+	{
+		.virtual	= ITCM_OFFSET,
+		.pfn		= __phys_to_pfn(ITCM_OFFSET),
+		.length		= 0,
+		.type		= MT_MEMORY_ITCM
+	}
+};
+
+/*
+ * Allocate a chunk of TCM memory
+ */
+void *tcm_alloc(size_t len)
+{
+	unsigned long vaddr;
+
+	if (!tcm_pool)
+		return NULL;
+
+	vaddr = gen_pool_alloc(tcm_pool, len);
+	if (!vaddr)
+		return NULL;
+
+	return (void *) vaddr;
+}
+EXPORT_SYMBOL(tcm_alloc);
+
+/*
+ * Free a chunk of TCM memory
+ */
+void tcm_free(void *addr, size_t len)
+{
+	gen_pool_free(tcm_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(tcm_free);
+
+bool tcm_dtcm_present(void)
+{
+	return dtcm_present;
+}
+EXPORT_SYMBOL(tcm_dtcm_present);
+
+bool tcm_itcm_present(void)
+{
+	return itcm_present;
+}
+EXPORT_SYMBOL(tcm_itcm_present);
+
+static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
+				  u32 *offset)
+{
+	const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128,
+				    256, 512, 1024, -1, -1, -1, -1 };
+	u32 tcm_region;
+	int tcm_size;
+
+	/*
+	 * If there are more than one TCM bank of this type,
+	 * select the TCM bank to operate on in the TCM selection
+	 * register.
+	 */
+	if (banks > 1)
+		asm("mcr	p15, 0, %0, c9, c2, 0"
+		    : /* No output operands */
+		    : "r" (bank));
+
+	/* Read the special TCM region register c9, 0 */
+	if (!type)
+		asm("mrc	p15, 0, %0, c9, c1, 0"
+		    : "=r" (tcm_region));
+	else
+		asm("mrc	p15, 0, %0, c9, c1, 1"
+		    : "=r" (tcm_region));
+
+	tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f];
+	if (tcm_size < 0) {
+		pr_err("CPU: %sTCM%d of unknown size\n",
+		       type ? "I" : "D", bank);
+		return -EINVAL;
+	} else if (tcm_size > 32) {
+		pr_err("CPU: %sTCM%d larger than 32k found\n",
+		       type ? "I" : "D", bank);
+		return -EINVAL;
+	} else {
+		pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n",
+			type ? "I" : "D",
+			bank,
+			tcm_size,
+			(tcm_region & 0xfffff000U),
+			(tcm_region & 1) ? "" : "not ");
+	}
+
+	/* Not much fun you can do with a size 0 bank */
+	if (tcm_size == 0)
+		return 0;
+
+	/* Force move the TCM bank to where we want it, enable */
+	tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
+
+	if (!type)
+		asm("mcr	p15, 0, %0, c9, c1, 0"
+		    : /* No output operands */
+		    : "r" (tcm_region));
+	else
+		asm("mcr	p15, 0, %0, c9, c1, 1"
+		    : /* No output operands */
+		    : "r" (tcm_region));
+
+	/* Increase offset */
+	*offset += (tcm_size << 10);
+
+	pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n",
+		type ? "I" : "D",
+		bank,
+		tcm_size,
+		(tcm_region & 0xfffff000U));
+	return 0;
+}
+
+/*
+ * This initializes the TCM memory
+ */
+void __init tcm_init(void)
+{
+	u32 tcm_status;
+	u8 dtcm_banks;
+	u8 itcm_banks;
+	size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
+	size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
+	char *start;
+	char *end;
+	char *ram;
+	int ret;
+	int i;
+
+	/*
+	 * Prior to ARMv5 there is no TCM, and trying to read the status
+	 * register will hang the processor.
+	 */
+	if (cpu_architecture() < CPU_ARCH_ARMv5) {
+		if (dtcm_code_sz || itcm_code_sz)
+			pr_info("CPU TCM: %u bytes of DTCM and %u bytes of "
+				"ITCM code compiled in, but no TCM present "
+				"in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz);
+		return;
+	}
+
+	tcm_status = read_cpuid_tcmstatus();
+	dtcm_banks = (tcm_status >> 16) & 0x03;
+	itcm_banks = (tcm_status & 0x03);
+
+	/* Values greater than 2 for D/ITCM banks are "reserved" */
+	if (dtcm_banks > 2)
+		dtcm_banks = 0;
+	if (itcm_banks > 2)
+		itcm_banks = 0;
+
+	/* Setup DTCM if present */
+	if (dtcm_banks > 0) {
+		for (i = 0; i < dtcm_banks; i++) {
+			ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end);
+			if (ret)
+				return;
+		}
+		/* This means you compiled more code than fits into DTCM */
+		if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
+			pr_info("CPU DTCM: %u bytes of code compiled to "
+				"DTCM but only %lu bytes of DTCM present\n",
+				dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
+			goto no_dtcm;
+		}
+		dtcm_res.end = dtcm_end - 1;
+		request_resource(&iomem_resource, &dtcm_res);
+		dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
+		iotable_init(dtcm_iomap, 1);
+		/* Copy data from RAM to DTCM */
+		start = &__sdtcm_data;
+		end   = &__edtcm_data;
+		ram   = &__dtcm_start;
+		memcpy(start, ram, dtcm_code_sz);
+		pr_debug("CPU DTCM: copied data from %p - %p\n",
+			 start, end);
+		dtcm_present = true;
+	} else if (dtcm_code_sz) {
+		pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
+			"DTCM banks present in CPU\n", dtcm_code_sz);
+	}
+
+no_dtcm:
+	/* Setup ITCM if present */
+	if (itcm_banks > 0) {
+		for (i = 0; i < itcm_banks; i++) {
+			ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end);
+			if (ret)
+				return;
+		}
+		/* This means you compiled more code than fits into ITCM */
+		if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
+			pr_info("CPU ITCM: %u bytes of code compiled to "
+				"ITCM but only %lu bytes of ITCM present\n",
+				itcm_code_sz, (itcm_end - ITCM_OFFSET));
+			return;
+		}
+		itcm_res.end = itcm_end - 1;
+		request_resource(&iomem_resource, &itcm_res);
+		itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
+		iotable_init(itcm_iomap, 1);
+		/* Copy code from RAM to ITCM */
+		start = &__sitcm_text;
+		end   = &__eitcm_text;
+		ram   = &__itcm_start;
+		memcpy(start, ram, itcm_code_sz);
+		pr_debug("CPU ITCM: copied code from %p - %p\n",
+			 start, end);
+		itcm_present = true;
+	} else if (itcm_code_sz) {
+		pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
+			"ITCM banks present in CPU\n", itcm_code_sz);
+	}
+}
+
+/*
+ * This creates the TCM memory pool and has to be done later,
+ * during the core_initicalls, since the allocator is not yet
+ * up and running when the first initialization runs.
+ */
+static int __init setup_tcm_pool(void)
+{
+	u32 dtcm_pool_start = (u32) &__edtcm_data;
+	u32 itcm_pool_start = (u32) &__eitcm_text;
+	int ret;
+
+	/*
+	 * Set up malloc pool, 2^2 = 4 bytes granularity since
+	 * the TCM is sometimes just 4 KiB. NB: pages and cache
+	 * line alignments does not matter in TCM!
+	 */
+	tcm_pool = gen_pool_create(2, -1);
+
+	pr_debug("Setting up TCM memory pool\n");
+
+	/* Add the rest of DTCM to the TCM pool */
+	if (dtcm_present) {
+		if (dtcm_pool_start < dtcm_end) {
+			ret = gen_pool_add(tcm_pool, dtcm_pool_start,
+					   dtcm_end - dtcm_pool_start, -1);
+			if (ret) {
+				pr_err("CPU DTCM: could not add DTCM " \
+				       "remainder to pool!\n");
+				return ret;
+			}
+			pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \
+				 "the TCM memory pool\n",
+				 dtcm_end - dtcm_pool_start,
+				 dtcm_pool_start);
+		}
+	}
+
+	/* Add the rest of ITCM to the TCM pool */
+	if (itcm_present) {
+		if (itcm_pool_start < itcm_end) {
+			ret = gen_pool_add(tcm_pool, itcm_pool_start,
+					   itcm_end - itcm_pool_start, -1);
+			if (ret) {
+				pr_err("CPU ITCM: could not add ITCM " \
+				       "remainder to pool!\n");
+				return ret;
+			}
+			pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \
+				 "the TCM memory pool\n",
+				 itcm_end - itcm_pool_start,
+				 itcm_pool_start);
+		}
+	}
+	return 0;
+}
+
+core_initcall(setup_tcm_pool);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.h b/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.h
new file mode 100644
index 0000000..8015ad4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/tcm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/thumbee.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/thumbee.c
new file mode 100644
index 0000000..aab8997
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/thumbee.c
@@ -0,0 +1,82 @@
+/*
+ * arch/arm/kernel/thumbee.c
+ *
+ * Copyright (C) 2008 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/system_info.h>
+#include <asm/thread_notify.h>
+
+/*
+ * Access to the ThumbEE Handler Base register
+ */
+static inline unsigned long teehbr_read(void)
+{
+	unsigned long v;
+	asm("mrc	p14, 6, %0, c1, c0, 0\n" : "=r" (v));
+	return v;
+}
+
+static inline void teehbr_write(unsigned long v)
+{
+	asm("mcr	p14, 6, %0, c1, c0, 0\n" : : "r" (v));
+}
+
+static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void *t)
+{
+	struct thread_info *thread = t;
+
+	switch (cmd) {
+	case THREAD_NOTIFY_FLUSH:
+		thread->thumbee_state = 0;
+		break;
+	case THREAD_NOTIFY_SWITCH:
+		current_thread_info()->thumbee_state = teehbr_read();
+		teehbr_write(thread->thumbee_state);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block thumbee_notifier_block = {
+	.notifier_call	= thumbee_notifier,
+};
+
+static int __init thumbee_init(void)
+{
+	unsigned long pfr0;
+	unsigned int cpu_arch = cpu_architecture();
+
+	if (cpu_arch < CPU_ARCH_ARMv7)
+		return 0;
+
+	/* processor feature register 0 */
+	asm("mrc	p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
+	if ((pfr0 & 0x0000f000) != 0x00001000)
+		return 0;
+
+	printk(KERN_INFO "ThumbEE CPU extension supported.\n");
+	elf_hwcap |= HWCAP_THUMBEE;
+	thread_register_notifier(&thumbee_notifier_block);
+
+	return 0;
+}
+
+late_initcall(thumbee_init);
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/time.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/time.c
new file mode 100644
index 0000000..fe31b22
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/time.c
@@ -0,0 +1,152 @@
+/*
+ *  linux/arch/arm/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *  Modifications for ARM (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This file contains the ARM-specific time handling details:
+ *  reading the RTC at bootup, etc...
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/profile.h>
+#include <linux/syscore_ops.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+
+#include <asm/leds.h>
+#include <asm/thread_info.h>
+#include <asm/sched_clock.h>
+#include <asm/stacktrace.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+
+/*
+ * Our system timer.
+ */
+static struct sys_timer *system_timer;
+
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \
+    defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE)
+/* this needs a better home */
+DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
+#endif	/* pc-style 'CMOS' RTC support */
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY	(1000000/HZ)
+
+#ifdef CONFIG_SMP
+unsigned long profile_pc(struct pt_regs *regs)
+{
+	struct stackframe frame;
+
+	if (!in_lock_functions(regs->ARM_pc))
+		return regs->ARM_pc;
+
+	frame.fp = regs->ARM_fp;
+	frame.sp = regs->ARM_sp;
+	frame.lr = regs->ARM_lr;
+	frame.pc = regs->ARM_pc;
+	do {
+		int ret = unwind_frame(&frame);
+		if (ret < 0)
+			return 0;
+	} while (in_lock_functions(frame.pc));
+
+	return frame.pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+u32 arch_gettimeoffset(void)
+{
+	if (system_timer->offset != NULL)
+		return system_timer->offset() * 1000;
+
+	return 0;
+}
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
+
+#ifdef CONFIG_LEDS_TIMER
+static inline void do_leds(void)
+{
+	static unsigned int count = HZ/2;
+
+	if (--count == 0) {
+		count = HZ/2;
+		leds_event(led_timer);
+	}
+}
+#else
+#define	do_leds()
+#endif
+
+
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+/*
+ * Kernel system timer support.
+ */
+void timer_tick(void)
+{
+	profile_tick(CPU_PROFILING);
+	do_leds();
+	xtime_update(1);
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(get_irq_regs()));
+#endif
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
+static int timer_suspend(void)
+{
+	if (system_timer->suspend)
+		system_timer->suspend();
+
+	return 0;
+}
+
+static void timer_resume(void)
+{
+	if (system_timer->resume)
+		system_timer->resume();
+}
+#else
+#define timer_suspend NULL
+#define timer_resume NULL
+#endif
+
+static struct syscore_ops timer_syscore_ops = {
+	.suspend	= timer_suspend,
+	.resume		= timer_resume,
+};
+
+static int __init timer_init_syscore_ops(void)
+{
+	register_syscore_ops(&timer_syscore_ops);
+
+	return 0;
+}
+
+device_initcall(timer_init_syscore_ops);
+
+void __init time_init(void)
+{
+	system_timer = machine_desc->timer;
+	system_timer->init();
+	sched_clock_postinit();
+}
+
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/topology.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/topology.c
new file mode 100644
index 0000000..140c817
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/topology.c
@@ -0,0 +1,150 @@
+/*
+ * arch/arm/kernel/topology.c
+ *
+ * Copyright (C) 2011 Linaro Limited.
+ * Written by: Vincent Guittot
+ *
+ * based on arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/sched.h>
+
+#include <asm/cputype.h>
+#include <asm/topology.h>
+
+#define MPIDR_SMP_BITMASK (0x3 << 30)
+#define MPIDR_SMP_VALUE (0x2 << 30)
+
+#define MPIDR_MT_BITMASK (0x1 << 24)
+
+/*
+ * These masks reflect the current use of the affinity levels.
+ * The affinity level can be up to 16 bits according to ARM ARM
+ */
+
+#define MPIDR_LEVEL0_MASK 0x3
+#define MPIDR_LEVEL0_SHIFT 0
+
+#define MPIDR_LEVEL1_MASK 0xF
+#define MPIDR_LEVEL1_SHIFT 8
+
+#define MPIDR_LEVEL2_MASK 0xFF
+#define MPIDR_LEVEL2_SHIFT 16
+
+struct cputopo_arm cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+	return &cpu_topology[cpu].core_sibling;
+}
+
+/*
+ * store_cpu_topology is called at boot when only one cpu is running
+ * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
+ * which prevents simultaneous write access to cpu_topology array
+ */
+void store_cpu_topology(unsigned int cpuid)
+{
+	struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
+	unsigned int mpidr;
+	unsigned int cpu;
+
+	/* If the cpu topology has been already set, just return */
+	if (cpuid_topo->core_id != -1)
+		return;
+
+	mpidr = read_cpuid_mpidr();
+
+	/* create cpu topology mapping */
+	if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
+		/*
+		 * This is a multiprocessor system
+		 * multiprocessor format & multiprocessor mode field are set
+		 */
+
+		if (mpidr & MPIDR_MT_BITMASK) {
+			/* core performance interdependency */
+			cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+				& MPIDR_LEVEL0_MASK;
+			cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+				& MPIDR_LEVEL1_MASK;
+			cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
+				& MPIDR_LEVEL2_MASK;
+		} else {
+			/* largely independent cores */
+			cpuid_topo->thread_id = -1;
+			cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
+				& MPIDR_LEVEL0_MASK;
+			cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
+				& MPIDR_LEVEL1_MASK;
+		}
+	} else {
+		/*
+		 * This is an uniprocessor system
+		 * we are in multiprocessor format but uniprocessor system
+		 * or in the old uniprocessor format
+		 */
+		cpuid_topo->thread_id = -1;
+		cpuid_topo->core_id = 0;
+		cpuid_topo->socket_id = -1;
+	}
+
+	/* update core and thread sibling masks */
+	for_each_possible_cpu(cpu) {
+		struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
+
+		if (cpuid_topo->socket_id == cpu_topo->socket_id) {
+			cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+			if (cpu != cpuid)
+				cpumask_set_cpu(cpu,
+					&cpuid_topo->core_sibling);
+
+			if (cpuid_topo->core_id == cpu_topo->core_id) {
+				cpumask_set_cpu(cpuid,
+					&cpu_topo->thread_sibling);
+				if (cpu != cpuid)
+					cpumask_set_cpu(cpu,
+						&cpuid_topo->thread_sibling);
+			}
+		}
+	}
+	smp_wmb();
+
+	printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+		cpuid, cpu_topology[cpuid].thread_id,
+		cpu_topology[cpuid].core_id,
+		cpu_topology[cpuid].socket_id, mpidr);
+}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void init_cpu_topology(void)
+{
+	unsigned int cpu;
+
+	/* init core mask */
+	for_each_possible_cpu(cpu) {
+		struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
+
+		cpu_topo->thread_id = -1;
+		cpu_topo->core_id =  -1;
+		cpu_topo->socket_id = -1;
+		cpumask_clear(&cpu_topo->core_sibling);
+		cpumask_clear(&cpu_topo->thread_sibling);
+	}
+	smp_wmb();
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/traps.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/traps.c
new file mode 100644
index 0000000..fb3912c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/traps.c
@@ -0,0 +1,839 @@
+/*
+ *  linux/arch/arm/kernel/traps.c
+ *
+ *  Copyright (C) 1995-2009 Russell King
+ *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  'traps.c' handles hardware exceptions after we have saved some state in
+ *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
+ *  kill the offending process.
+ */
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/exception.h>
+#include <asm/unistd.h>
+#include <asm/traps.h>
+#include <asm/unwind.h>
+#include <asm/tls.h>
+#include <asm/system_misc.h>
+
+#include "signal.h"
+
+static const char *handler[]= {
+	"prefetch abort",
+	"data abort",
+	"address exception",
+	"interrupt",
+	"undefined instruction",
+};
+
+void *vectors_page;
+
+#ifdef CONFIG_DEBUG_USER
+unsigned int user_debug;
+
+static int __init user_debug_setup(char *str)
+{
+	get_option(&str, &user_debug);
+	return 1;
+}
+__setup("user_debug=", user_debug_setup);
+#endif
+
+static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+
+void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+{
+#ifdef CONFIG_KALLSYMS
+	printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+#else
+	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+#endif
+
+	if (in_exception_text(where))
+		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+}
+
+#ifndef CONFIG_ARM_UNWIND
+/*
+ * Stack pointers should always be within the kernels view of
+ * physical memory.  If it is not there, then we can't dump
+ * out any information relating to the stack.
+ */
+static int verify_stack(unsigned long sp)
+{
+	if (sp < PAGE_OFFSET ||
+	    (sp > (unsigned long)high_memory && high_memory != NULL))
+		return -EFAULT;
+
+	return 0;
+}
+#endif
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+		     unsigned long top)
+{
+	unsigned long first;
+	mm_segment_t fs;
+	int i;
+
+	/*
+	 * We need to switch to kernel mode so that we can use __get_user
+	 * to safely read from kernel space.  Note that we now dump the
+	 * code first, just in case the backtrace kills us.
+	 */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
+
+	for (first = bottom & ~31; first < top; first += 32) {
+		unsigned long p;
+		char str[sizeof(" 12345678") * 8 + 1];
+
+		memset(str, ' ', sizeof(str));
+		str[sizeof(str) - 1] = '\0';
+
+		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+			if (p >= bottom && p < top) {
+				unsigned long val;
+				if (__get_user(val, (unsigned long *)p) == 0)
+					sprintf(str + i * 9, " %08lx", val);
+				else
+					sprintf(str + i * 9, " ????????");
+			}
+		}
+		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
+	}
+
+	set_fs(fs);
+}
+
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+	unsigned long addr = instruction_pointer(regs);
+	const int thumb = thumb_mode(regs);
+	const int width = thumb ? 4 : 8;
+	mm_segment_t fs;
+	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+	int i;
+
+	/*
+	 * We need to switch to kernel mode so that we can use __get_user
+	 * to safely read from kernel space.  Note that we now dump the
+	 * code first, just in case the backtrace kills us.
+	 */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	for (i = -4; i < 1 + !!thumb; i++) {
+		unsigned int val, bad;
+
+		if (thumb)
+			bad = __get_user(val, &((u16 *)addr)[i]);
+		else
+			bad = __get_user(val, &((u32 *)addr)[i]);
+
+		if (!bad)
+			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+					width, val);
+		else {
+			p += sprintf(p, "bad PC value");
+			break;
+		}
+	}
+	printk("%sCode: %s\n", lvl, str);
+
+	set_fs(fs);
+}
+
+#ifdef CONFIG_ARM_UNWIND
+static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+	unwind_backtrace(regs, tsk);
+}
+#else
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+	unsigned int fp, mode;
+	int ok = 1;
+
+	printk("Backtrace: ");
+
+	if (!tsk)
+		tsk = current;
+
+	if (regs) {
+		fp = regs->ARM_fp;
+		mode = processor_mode(regs);
+	} else if (tsk != current) {
+		fp = thread_saved_fp(tsk);
+		mode = 0x10;
+	} else {
+		asm("mov %0, fp" : "=r" (fp) : : "cc");
+		mode = 0x10;
+	}
+
+	if (!fp) {
+		printk("no frame pointer");
+		ok = 0;
+	} else if (verify_stack(fp)) {
+		printk("invalid frame pointer 0x%08x", fp);
+		ok = 0;
+	} else if (fp < (unsigned long)end_of_stack(tsk))
+		printk("frame pointer underflow");
+	printk("\n");
+
+	if (ok)
+		c_backtrace(fp, mode);
+}
+#endif
+
+void dump_stack(void)
+{
+#ifndef CONFIG_MIN_8M_VERSION
+	dump_backtrace(NULL, NULL);
+#endif
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+#ifndef CONFIG_MIN_8M_VERSION
+	dump_backtrace(NULL, tsk);
+	barrier();
+#endif
+}
+
+#ifdef CONFIG_PREEMPT
+#define S_PREEMPT " PREEMPT"
+#else
+#define S_PREEMPT ""
+#endif
+#ifdef CONFIG_SMP
+#define S_SMP " SMP"
+#else
+#define S_SMP ""
+#endif
+#ifdef CONFIG_THUMB2_KERNEL
+#define S_ISA " THUMB2"
+#else
+#define S_ISA " ARM"
+#endif
+
+static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
+{
+	struct task_struct *tsk = thread->task;
+	static int die_counter;
+	int ret;
+
+	printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
+	       S_ISA "\n", str, err, ++die_counter);
+
+	/* trap and error numbers are mostly meaningless on ARM */
+	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
+	if (ret == NOTIFY_STOP)
+		return ret;
+
+	print_modules();
+	__show_regs(regs);
+	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+
+	if (!user_mode(regs) || in_interrupt()) {
+		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
+			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+#ifndef CONFIG_MIN_8M_VERSION
+		dump_backtrace(regs, tsk);
+#endif
+		dump_instr(KERN_EMERG, regs);
+	}
+
+	return ret;
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+	struct thread_info *thread = current_thread_info();
+	int ret;
+	enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+
+	oops_enter();
+
+	raw_spin_lock_irq(&die_lock);
+	console_verbose();
+	bust_spinlocks(1);
+	if (!user_mode(regs))
+		bug_type = report_bug(regs->ARM_pc, regs);
+	if (bug_type != BUG_TRAP_TYPE_NONE)
+		str = "Oops - BUG";
+	ret = __die(str, err, thread, regs);
+
+	if (regs && kexec_should_crash(thread->task))
+		crash_kexec(regs);
+
+	bust_spinlocks(0);
+	add_taint(TAINT_DIE);
+	raw_spin_unlock_irq(&die_lock);
+	oops_exit();
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+	if (panic_on_oops)
+		panic("Fatal exception");
+	if (ret != NOTIFY_STOP)
+		do_exit(SIGSEGV);
+}
+
+void arm_notify_die(const char *str, struct pt_regs *regs,
+		struct siginfo *info, unsigned long err, unsigned long trap)
+{
+	if (user_mode(regs)) {
+		current->thread.error_code = err;
+		current->thread.trap_no = trap;
+
+		force_sig_info(info->si_signo, info, current);
+	} else {
+		die(str, regs, err);
+	}
+}
+
+#ifdef CONFIG_GENERIC_BUG
+
+int is_valid_bugaddr(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+	unsigned short bkpt;
+#else
+	unsigned long bkpt;
+#endif
+
+	if (probe_kernel_address((unsigned *)pc, bkpt))
+		return 0;
+
+	return bkpt == BUG_INSTR_VALUE;
+}
+
+#endif
+
+static LIST_HEAD(undef_hook);
+static DEFINE_RAW_SPINLOCK(undef_lock);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&undef_lock, flags);
+	list_add(&hook->node, &undef_hook);
+	raw_spin_unlock_irqrestore(&undef_lock, flags);
+}
+
+void unregister_undef_hook(struct undef_hook *hook)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&undef_lock, flags);
+	list_del(&hook->node);
+	raw_spin_unlock_irqrestore(&undef_lock, flags);
+}
+
+static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+{
+	struct undef_hook *hook;
+	unsigned long flags;
+	int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+	raw_spin_lock_irqsave(&undef_lock, flags);
+	list_for_each_entry(hook, &undef_hook, node)
+		if ((instr & hook->instr_mask) == hook->instr_val &&
+		    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
+			fn = hook->fn;
+	raw_spin_unlock_irqrestore(&undef_lock, flags);
+
+	return fn ? fn(regs, instr) : 1;
+}
+
+asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+{
+	unsigned int instr;
+	siginfo_t info;
+	void __user *pc;
+
+	pc = (void __user *)instruction_pointer(regs);
+
+	if (processor_mode(regs) == SVC_MODE) {
+#ifdef CONFIG_THUMB2_KERNEL
+		if (thumb_mode(regs)) {
+			instr = ((u16 *)pc)[0];
+			if (is_wide_instruction(instr)) {
+				instr <<= 16;
+				instr |= ((u16 *)pc)[1];
+			}
+		} else
+#endif
+			instr = *(u32 *) pc;
+	} else if (thumb_mode(regs)) {
+		if (get_user(instr, (u16 __user *)pc))
+			goto die_sig;
+		if (is_wide_instruction(instr)) {
+			unsigned int instr2;
+			if (get_user(instr2, (u16 __user *)pc+1))
+				goto die_sig;
+			instr <<= 16;
+			instr |= instr2;
+		}
+	} else if (get_user(instr, (u32 __user *)pc)) {
+		goto die_sig;
+	}
+
+	if (call_undef_hook(regs, instr) == 0)
+		return;
+
+die_sig:
+#ifdef CONFIG_DEBUG_USER
+	if (user_debug & UDBG_UNDEFINED) {
+		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
+			current->comm, task_pid_nr(current), pc);
+		dump_instr(KERN_INFO, regs);
+	}
+#endif
+
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code  = ILL_ILLOPC;
+	info.si_addr  = pc;
+
+	arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+}
+
+asmlinkage void do_unexp_fiq (struct pt_regs *regs)
+{
+	printk("Hmm.  Unexpected FIQ received, but trying to continue\n");
+	printk("You may have a hardware problem...\n");
+}
+
+/*
+ * bad_mode handles the impossible case in the vectors.  If you see one of
+ * these, then it's extremely serious, and could mean you have buggy hardware.
+ * It never returns, and never tries to sync.  We hope that we can at least
+ * dump out some state information...
+ */
+asmlinkage void bad_mode(struct pt_regs *regs, int reason)
+{
+	console_verbose();
+
+	printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
+
+	die("Oops - bad mode", regs, 0);
+	local_irq_disable();
+	panic("bad mode");
+}
+
+static int bad_syscall(int n, struct pt_regs *regs)
+{
+	struct thread_info *thread = current_thread_info();
+	siginfo_t info;
+
+	if ((current->personality & PER_MASK) != PER_LINUX &&
+	    thread->exec_domain->handler) {
+		thread->exec_domain->handler(n, regs);
+		return regs->ARM_r0;
+	}
+
+#ifdef CONFIG_DEBUG_USER
+	if (user_debug & UDBG_SYSCALL) {
+		printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
+			task_pid_nr(current), current->comm, n);
+		dump_instr(KERN_ERR, regs);
+	}
+#endif
+
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code  = ILL_ILLTRP;
+	info.si_addr  = (void __user *)instruction_pointer(regs) -
+			 (thumb_mode(regs) ? 2 : 4);
+
+	arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
+
+	return regs->ARM_r0;
+}
+
+static inline void
+do_cache_op(unsigned long start, unsigned long end, int flags)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+
+	if (end < start || flags)
+		return;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			start = vma->vm_start;
+		if (end > vma->vm_end)
+			end = vma->vm_end;
+
+		up_read(&mm->mmap_sem);
+		flush_cache_user_range(start, end);
+		return;
+	}
+	up_read(&mm->mmap_sem);
+}
+
+/*
+ * Handle all unrecognised system calls.
+ *  0x9f0000 - 0x9fffff are some more esoteric system calls
+ */
+#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
+asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+{
+	struct thread_info *thread = current_thread_info();
+	siginfo_t info;
+
+	if ((no >> 16) != (__ARM_NR_BASE>> 16))
+		return bad_syscall(no, regs);
+
+	switch (no & 0xffff) {
+	case 0: /* branch through 0 */
+		info.si_signo = SIGSEGV;
+		info.si_errno = 0;
+		info.si_code  = SEGV_MAPERR;
+		info.si_addr  = NULL;
+
+		arm_notify_die("branch through zero", regs, &info, 0, 0);
+		return 0;
+
+	case NR(breakpoint): /* SWI BREAK_POINT */
+		regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
+		ptrace_break(current, regs);
+		return regs->ARM_r0;
+
+	/*
+	 * Flush a region from virtual address 'r0' to virtual address 'r1'
+	 * _exclusive_.  There is no alignment requirement on either address;
+	 * user space does not need to know the hardware cache layout.
+	 *
+	 * r2 contains flags.  It should ALWAYS be passed as ZERO until it
+	 * is defined to be something else.  For now we ignore it, but may
+	 * the fires of hell burn in your belly if you break this rule. ;)
+	 *
+	 * (at a later date, we may want to allow this call to not flush
+	 * various aspects of the cache.  Passing '0' will guarantee that
+	 * everything necessary gets flushed to maintain consistency in
+	 * the specified region).
+	 */
+	case NR(cacheflush):
+		do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
+		return 0;
+
+	case NR(usr26):
+		if (!(elf_hwcap & HWCAP_26BIT))
+			break;
+		regs->ARM_cpsr &= ~MODE32_BIT;
+		return regs->ARM_r0;
+
+	case NR(usr32):
+		if (!(elf_hwcap & HWCAP_26BIT))
+			break;
+		regs->ARM_cpsr |= MODE32_BIT;
+		return regs->ARM_r0;
+
+	case NR(set_tls):
+		/*Fix for HUB: CVE-2014-9870*/
+		thread->tp_value[0] = regs->ARM_r0;
+		if (tls_emu)
+			return 0;
+		if (has_tls_reg) {
+			asm ("mcr p15, 0, %0, c13, c0, 3"
+				: : "r" (regs->ARM_r0));
+		} else {
+			/*
+			 * User space must never try to access this directly.
+			 * Expect your app to break eventually if you do so.
+			 * The user helper at 0xffff0fe0 must be used instead.
+			 * (see entry-armv.S for details)
+			 */
+			*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+		}
+		return 0;
+
+#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+	/*
+	 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
+	 * Return zero in r0 if *MEM was changed or non-zero if no exchange
+	 * happened.  Also set the user C flag accordingly.
+	 * If access permissions have to be fixed up then non-zero is
+	 * returned and the operation has to be re-attempted.
+	 *
+	 * *NOTE*: This is a ghost syscall private to the kernel.  Only the
+	 * __kuser_cmpxchg code in entry-armv.S should be aware of its
+	 * existence.  Don't ever use this from user code.
+	 */
+	case NR(cmpxchg):
+	for (;;) {
+		extern void do_DataAbort(unsigned long addr, unsigned int fsr,
+					 struct pt_regs *regs);
+		unsigned long val;
+		unsigned long addr = regs->ARM_r2;
+		struct mm_struct *mm = current->mm;
+		pgd_t *pgd; pmd_t *pmd; pte_t *pte;
+		spinlock_t *ptl;
+
+		regs->ARM_cpsr &= ~PSR_C_BIT;
+		down_read(&mm->mmap_sem);
+		pgd = pgd_offset(mm, addr);
+		if (!pgd_present(*pgd))
+			goto bad_access;
+		pmd = pmd_offset(pgd, addr);
+		if (!pmd_present(*pmd))
+			goto bad_access;
+		pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+		if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
+			pte_unmap_unlock(pte, ptl);
+			goto bad_access;
+		}
+		val = *(unsigned long *)addr;
+		val -= regs->ARM_r0;
+		if (val == 0) {
+			*(unsigned long *)addr = regs->ARM_r1;
+			regs->ARM_cpsr |= PSR_C_BIT;
+		}
+		pte_unmap_unlock(pte, ptl);
+		up_read(&mm->mmap_sem);
+		return val;
+
+		bad_access:
+		up_read(&mm->mmap_sem);
+		/* simulate a write access fault */
+		do_DataAbort(addr, 15 + (1 << 11), regs);
+	}
+#endif
+
+	default:
+		/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
+		   if not implemented, rather than raising SIGILL.  This
+		   way the calling program can gracefully determine whether
+		   a feature is supported.  */
+		if ((no & 0xffff) <= 0x7ff)
+			return -ENOSYS;
+		break;
+	}
+#ifdef CONFIG_DEBUG_USER
+	/*
+	 * experience shows that these seem to indicate that
+	 * something catastrophic has happened
+	 */
+	if (user_debug & UDBG_SYSCALL) {
+		printk("[%d] %s: arm syscall %d\n",
+		       task_pid_nr(current), current->comm, no);
+		dump_instr("", regs);
+		if (user_mode(regs)) {
+			__show_regs(regs);
+			c_backtrace(regs->ARM_fp, processor_mode(regs));
+		}
+	}
+#endif
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code  = ILL_ILLTRP;
+	info.si_addr  = (void __user *)instruction_pointer(regs) -
+			 (thumb_mode(regs) ? 2 : 4);
+
+	arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
+	return 0;
+}
+
+#ifdef CONFIG_TLS_REG_EMUL
+
+/*
+ * We might be running on an ARMv6+ processor which should have the TLS
+ * register but for some reason we can't use it, or maybe an SMP system
+ * using a pre-ARMv6 processor (there are apparently a few prototypes like
+ * that in existence) and therefore access to that register must be
+ * emulated.
+ */
+
+static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
+{
+	int reg = (instr >> 12) & 15;
+	if (reg == 15)
+		return 1;
+	
+	/*Fix for HUB: CVE-2014-9870*/
+	regs->uregs[reg] = current_thread_info()->tp_value[0];
+	regs->ARM_pc += 4;
+	return 0;
+}
+
+static struct undef_hook arm_mrc_hook = {
+	.instr_mask	= 0x0fff0fff,
+	.instr_val	= 0x0e1d0f70,
+	.cpsr_mask	= PSR_T_BIT,
+	.cpsr_val	= 0,
+	.fn		= get_tp_trap,
+};
+
+static int __init arm_mrc_hook_init(void)
+{
+	register_undef_hook(&arm_mrc_hook);
+	return 0;
+}
+
+late_initcall(arm_mrc_hook_init);
+
+#endif
+
+void __bad_xchg(volatile void *ptr, int size)
+{
+	printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
+		__builtin_return_address(0), ptr, size);
+	BUG();
+}
+EXPORT_SYMBOL(__bad_xchg);
+
+/*
+ * A data abort trap was taken, but we did not handle the instruction.
+ * Try to abort the user program, or panic if it was the kernel.
+ */
+asmlinkage void
+baddataabort(int code, unsigned long instr, struct pt_regs *regs)
+{
+	unsigned long addr = instruction_pointer(regs);
+	siginfo_t info;
+
+#ifdef CONFIG_DEBUG_USER
+	if (user_debug & UDBG_BADABORT) {
+		printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
+			task_pid_nr(current), current->comm, code, instr);
+		dump_instr(KERN_ERR, regs);
+		show_pte(current->mm, addr);
+	}
+#endif
+
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code  = ILL_ILLOPC;
+	info.si_addr  = (void __user *)addr;
+
+	arm_notify_die("unknown data abort code", regs, &info, instr, 0);
+}
+
+void __readwrite_bug(const char *fn)
+{
+	printk("%s called, but not implemented\n", fn);
+	BUG();
+}
+EXPORT_SYMBOL(__readwrite_bug);
+
+void __pte_error(const char *file, int line, pte_t pte)
+{
+	printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
+}
+
+void __pmd_error(const char *file, int line, pmd_t pmd)
+{
+	printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
+}
+
+void __pgd_error(const char *file, int line, pgd_t pgd)
+{
+	printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
+}
+
+asmlinkage void __div0(void)
+{
+	printk("Division by zero in kernel.\n");
+	dump_stack();
+}
+EXPORT_SYMBOL(__div0);
+
+void abort(void)
+{
+	BUG();
+
+	/* if that doesn't kill us, halt */
+	panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
+void __init trap_init(void)
+{
+	return;
+}
+
+static void __init kuser_get_tls_init(unsigned long vectors)
+{
+	/*
+	 * vectors + 0xfe0 = __kuser_get_tls
+	 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
+	 */
+	if (tls_emu || has_tls_reg)
+		memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+}
+
+void __init early_trap_init(void *vectors_base)
+{
+	unsigned long vectors = (unsigned long)vectors_base;
+	extern char __stubs_start[], __stubs_end[];
+	extern char __vectors_start[], __vectors_end[];
+	extern char __kuser_helper_start[], __kuser_helper_end[];
+	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+	vectors_page = vectors_base;
+
+	/*
+	 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
+	 * into the vector page, mapped at 0xffff0000, and ensure these
+	 * are visible to the instruction stream.
+	 */
+	memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
+	memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
+	memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
+	/*
+	 * Do processor specific fixups for the kuser helpers
+	 */
+	kuser_get_tls_init(vectors);
+
+	/*
+	 * Copy signal return handlers into the vector page, and
+	 * set sigreturn to be a pointer to these.
+	 */
+	memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
+	       sigreturn_codes, sizeof(sigreturn_codes));
+	memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+	       syscall_restart_code, sizeof(syscall_restart_code));
+
+	flush_icache_range(vectors, vectors + PAGE_SIZE);
+	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/unwind.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/unwind.c
new file mode 100755
index 0000000..b24696d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/unwind.c
@@ -0,0 +1,491 @@
+/*
+ * arch/arm/kernel/unwind.c
+ *
+ * Copyright (C) 2008 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Stack unwinding support for ARM
+ *
+ * An ARM EABI version of gcc is required to generate the unwind
+ * tables. For information about the structure of the unwind tables,
+ * see "Exception Handling ABI for the ARM Architecture" at:
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
+ */
+
+#ifndef __CHECKER__
+#if !defined (__ARM_EABI__)
+#warning Your compiler does not have EABI support.
+#warning    ARM unwind is known to compile only with EABI compilers.
+#warning    Change compiler or disable ARM_UNWIND option.
+#elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2)
+#warning Your compiler is too buggy; it is known to not compile ARM unwind support.
+#warning    Change compiler or disable ARM_UNWIND option.
+#endif
+#endif /* __CHECKER__ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+#include <asm/unwind.h>
+
+/* Dummy functions to avoid linker complaints */
+void __aeabi_unwind_cpp_pr0(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
+
+void __aeabi_unwind_cpp_pr1(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
+
+void __aeabi_unwind_cpp_pr2(void)
+{
+};
+EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
+
+struct unwind_ctrl_block {
+	unsigned long vrs[16];		/* virtual register set */
+	const unsigned long *insn;	/* pointer to the current instructions word */
+	int entries;			/* number of entries left to interpret */
+	int byte;			/* current byte number in the instructions word */
+};
+
+enum regs {
+#ifdef CONFIG_THUMB2_KERNEL
+	FP = 7,
+#else
+	FP = 11,
+#endif
+	SP = 13,
+	LR = 14,
+	PC = 15
+};
+
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
+
+static DEFINE_RAW_SPINLOCK(unwind_lock);
+static LIST_HEAD(unwind_tables);
+
+/* Convert a prel31 symbol to an absolute address */
+#define prel31_to_addr(ptr)				\
+({							\
+	/* sign-extend to 32 bits */			\
+	long offset = (((long)*(ptr)) << 1) >> 1;	\
+	(unsigned long)(ptr) + offset;			\
+})
+
+/*
+ * Binary search in the unwind index. The entries are
+ * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
+ */
+static const struct unwind_idx *search_index(unsigned long addr,
+				       const struct unwind_idx *start,
+				       const struct unwind_idx *origin,
+				       const struct unwind_idx *stop)
+{
+	unsigned long addr_prel31;
+
+	pr_debug("%s(%08lx, %p, %p, %p)\n",
+			__func__, addr, start, origin, stop);
+
+	/*
+	 * only search in the section with the matching sign. This way the
+	 * prel31 numbers can be compared as unsigned longs.
+	 */
+	if (addr < (unsigned long)start)
+		/* negative offsets: [start; origin) */
+		stop = origin;
+	else
+		/* positive offsets: [origin; stop) */
+		start = origin;
+
+	/* prel31 for address relavive to start */
+	addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
+
+	while (start < stop - 1) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+		/*
+		 * As addr_prel31 is relative to start an offset is needed to
+		 * make it relative to mid.
+		 */
+		if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+				mid->addr_offset)
+			stop = mid;
+		else {
+			/* keep addr_prel31 relative to start */
+			addr_prel31 -= ((unsigned long)mid -
+					(unsigned long)start);
+			start = mid;
+		}
+	}
+
+	if (likely(start->addr_offset <= addr_prel31))
+		return start;
+	else {
+		pr_warning("unwind: Unknown symbol address %08lx\n", addr);
+		return NULL;
+	}
+}
+
+static const struct unwind_idx *unwind_find_origin(
+		const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+	pr_debug("%s(%p, %p)\n", __func__, start, stop);
+	while (start < stop) {
+		const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+		if (mid->addr_offset >= 0x40000000)
+			/* negative offset */
+			start = mid + 1;
+		else
+			/* positive offset */
+			stop = mid;
+	}
+	pr_debug("%s -> %p\n", __func__, stop);
+	return stop;
+}
+
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
+{
+	const struct unwind_idx *idx = NULL;
+	unsigned long flags;
+
+	pr_debug("%s(%08lx)\n", __func__, addr);
+
+	if (core_kernel_text(addr)) {
+		if (unlikely(!__origin_unwind_idx))
+			__origin_unwind_idx =
+				unwind_find_origin(__start_unwind_idx,
+						__stop_unwind_idx);
+
+		/* main unwind table */
+		idx = search_index(addr, __start_unwind_idx,
+				   __origin_unwind_idx,
+				   __stop_unwind_idx);
+	} else {
+		/* module unwind tables */
+		struct unwind_table *table;
+
+		raw_spin_lock_irqsave(&unwind_lock, flags);
+		list_for_each_entry(table, &unwind_tables, list) {
+			if (addr >= table->begin_addr &&
+			    addr < table->end_addr) {
+				idx = search_index(addr, table->start,
+						   table->origin,
+						   table->stop);
+				/* Move-to-front to exploit common traces */
+				list_move(&table->list, &unwind_tables);
+				break;
+			}
+		}
+		raw_spin_unlock_irqrestore(&unwind_lock, flags);
+	}
+
+	pr_debug("%s: idx = %p\n", __func__, idx);
+	return idx;
+}
+
+static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
+{
+	unsigned long ret;
+
+	if (ctrl->entries <= 0) {
+		pr_warning("unwind: Corrupt unwind table\n");
+		return 0;
+	}
+
+	ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
+
+	if (ctrl->byte == 0) {
+		ctrl->insn++;
+		ctrl->entries--;
+		ctrl->byte = 3;
+	} else
+		ctrl->byte--;
+
+	return ret;
+}
+
+/*
+ * Execute the current unwind instruction.
+ */
+static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
+{
+	unsigned long insn = unwind_get_byte(ctrl);
+
+	pr_debug("%s: insn = %08lx\n", __func__, insn);
+
+	if ((insn & 0xc0) == 0x00)
+		ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
+	else if ((insn & 0xc0) == 0x40)
+		ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
+	else if ((insn & 0xf0) == 0x80) {
+		unsigned long mask;
+		unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+		int load_sp, reg = 4;
+
+		insn = (insn << 8) | unwind_get_byte(ctrl);
+		mask = insn & 0x0fff;
+		if (mask == 0) {
+			pr_warning("unwind: 'Refuse to unwind' instruction %04lx\n",
+				   insn);
+			return -URC_FAILURE;
+		}
+
+		/* pop R4-R15 according to mask */
+		load_sp = mask & (1 << (13 - 4));
+		while (mask) {
+			if (mask & 1)
+				ctrl->vrs[reg] = *vsp++;
+			mask >>= 1;
+			reg++;
+		}
+		if (!load_sp)
+			ctrl->vrs[SP] = (unsigned long)vsp;
+	} else if ((insn & 0xf0) == 0x90 &&
+		   (insn & 0x0d) != 0x0d)
+		ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
+	else if ((insn & 0xf0) == 0xa0) {
+		unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+		int reg;
+
+		/* pop R4-R[4+bbb] */
+		for (reg = 4; reg <= 4 + (insn & 7); reg++)
+			ctrl->vrs[reg] = *vsp++;
+		if (insn & 0x80)
+			ctrl->vrs[14] = *vsp++;
+		ctrl->vrs[SP] = (unsigned long)vsp;
+	} else if (insn == 0xb0) {
+		if (ctrl->vrs[PC] == 0)
+			ctrl->vrs[PC] = ctrl->vrs[LR];
+		/* no further processing */
+		ctrl->entries = 0;
+	} else if (insn == 0xb1) {
+		unsigned long mask = unwind_get_byte(ctrl);
+		unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
+		int reg = 0;
+
+		if (mask == 0 || mask & 0xf0) {
+			pr_warning("unwind: Spare encoding %04lx\n",
+			       (insn << 8) | mask);
+			return -URC_FAILURE;
+		}
+
+		/* pop R0-R3 according to mask */
+		while (mask) {
+			if (mask & 1)
+				ctrl->vrs[reg] = *vsp++;
+			mask >>= 1;
+			reg++;
+		}
+		ctrl->vrs[SP] = (unsigned long)vsp;
+	} else if (insn == 0xb2) {
+		unsigned long uleb128 = unwind_get_byte(ctrl);
+
+		ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
+	} else {
+		pr_warning("unwind: Unhandled instruction %02lx\n", insn);
+		return -URC_FAILURE;
+	}
+
+	pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
+		 ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
+
+	return URC_OK;
+}
+
+/*
+ * Unwind a single frame starting with *sp for the symbol at *pc. It
+ * updates the *pc and *sp with the new values.
+ */
+int unwind_frame(struct stackframe *frame)
+{
+	unsigned long high, low;
+	const struct unwind_idx *idx;
+	struct unwind_ctrl_block ctrl;
+
+	/* only go to a higher address on the stack */
+	low = frame->sp;
+	high = ALIGN(low, THREAD_SIZE);
+
+	pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
+		 frame->pc, frame->lr, frame->sp);
+
+	if (!kernel_text_address(frame->pc))
+		return -URC_FAILURE;
+
+	idx = unwind_find_idx(frame->pc);
+	if (!idx) {
+		//pr_warning("unwind: Index not found %08lx\n", frame->pc);
+		return -URC_FAILURE;
+	}
+
+	ctrl.vrs[FP] = frame->fp;
+	ctrl.vrs[SP] = frame->sp;
+	ctrl.vrs[LR] = frame->lr;
+	ctrl.vrs[PC] = 0;
+
+	if (idx->insn == 1)
+		/* can't unwind */
+		return -URC_FAILURE;
+	else if ((idx->insn & 0x80000000) == 0)
+		/* prel31 to the unwind table */
+		ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
+	else if ((idx->insn & 0xff000000) == 0x80000000)
+		/* only personality routine 0 supported in the index */
+		ctrl.insn = &idx->insn;
+	else {
+		pr_warning("unwind: Unsupported personality routine %08lx in the index at %p\n",
+			   idx->insn, idx);
+		return -URC_FAILURE;
+	}
+
+	/* check the personality routine */
+	if ((*ctrl.insn & 0xff000000) == 0x80000000) {
+		ctrl.byte = 2;
+		ctrl.entries = 1;
+	} else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
+		ctrl.byte = 1;
+		ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
+	} else {
+		pr_warning("unwind: Unsupported personality routine %08lx at %p\n",
+			   *ctrl.insn, ctrl.insn);
+		return -URC_FAILURE;
+	}
+
+	while (ctrl.entries > 0) {
+		int urc = unwind_exec_insn(&ctrl);
+		if (urc < 0)
+			return urc;
+		if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
+			return -URC_FAILURE;
+	}
+
+	if (ctrl.vrs[PC] == 0)
+		ctrl.vrs[PC] = ctrl.vrs[LR];
+
+	/* check for infinite loop */
+	if (frame->pc == ctrl.vrs[PC])
+		return -URC_FAILURE;
+
+	frame->fp = ctrl.vrs[FP];
+	frame->sp = ctrl.vrs[SP];
+	frame->lr = ctrl.vrs[LR];
+	frame->pc = ctrl.vrs[PC];
+
+	return URC_OK;
+}
+
+void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+	struct stackframe frame;
+	register unsigned long current_sp asm ("sp");
+
+	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+	if (!tsk)
+		tsk = current;
+
+	if (regs) {
+		frame.fp = regs->ARM_fp;
+		frame.sp = regs->ARM_sp;
+		frame.lr = regs->ARM_lr;
+		/* PC might be corrupted, use LR in that case. */
+		frame.pc = kernel_text_address(regs->ARM_pc)
+			 ? regs->ARM_pc : regs->ARM_lr;
+	} else if (tsk == current) {
+		frame.fp = (unsigned long)__builtin_frame_address(0);
+		frame.sp = current_sp;
+		frame.lr = (unsigned long)__builtin_return_address(0);
+		frame.pc = (unsigned long)unwind_backtrace;
+	} else {
+		/* task blocked in __switch_to */
+		frame.fp = thread_saved_fp(tsk);
+		frame.sp = thread_saved_sp(tsk);
+		/*
+		 * The function calling __switch_to cannot be a leaf function
+		 * so LR is recovered from the stack.
+		 */
+		frame.lr = 0;
+		frame.pc = thread_saved_pc(tsk);
+	}
+
+	while (1) {
+		int urc;
+		unsigned long where = frame.pc;
+
+		urc = unwind_frame(&frame);
+		if (urc < 0)
+			break;
+		dump_backtrace_entry(where, frame.pc, frame.sp - 4);
+	}
+}
+
+struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
+				      unsigned long text_addr,
+				      unsigned long text_size)
+{
+	unsigned long flags;
+	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
+
+	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
+		 text_addr, text_size);
+
+	if (!tab)
+		return tab;
+
+	tab->start = (const struct unwind_idx *)start;
+	tab->stop = (const struct unwind_idx *)(start + size);
+	tab->origin = unwind_find_origin(tab->start, tab->stop);
+	tab->begin_addr = text_addr;
+	tab->end_addr = text_addr + text_size;
+
+	raw_spin_lock_irqsave(&unwind_lock, flags);
+	list_add_tail(&tab->list, &unwind_tables);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+	return tab;
+}
+
+void unwind_table_del(struct unwind_table *tab)
+{
+	unsigned long flags;
+
+	if (!tab)
+		return;
+
+	raw_spin_lock_irqsave(&unwind_lock, flags);
+	list_del(&tab->list);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+	kfree(tab);
+}
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/vmlinux.lds.S b/ap/os/linux/linux-3.4.x/arch/arm/kernel/vmlinux.lds.S
new file mode 100755
index 0000000..43e5744
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/vmlinux.lds.S
@@ -0,0 +1,312 @@
+/* ld script to make ARM Linux kernel
+ * taken from the i386 version by Russell King
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+	
+#define PROC_INFO							\
+	. = ALIGN(4);							\
+	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
+	KEEP(*(.proc.info.init))					\
+	VMLINUX_SYMBOL(__proc_info_end) = .;
+
+#define IDMAP_TEXT							\
+	ALIGN_FUNCTION();						\
+	VMLINUX_SYMBOL(__idmap_text_start) = .;				\
+	KEEP(*(.idmap.text))						\
+	VMLINUX_SYMBOL(__idmap_text_end) = .;
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x)		x
+#else
+#define ARM_CPU_DISCARD(x)	x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+	defined(CONFIG_GENERIC_BUG)
+#define ARM_EXIT_KEEP(x)	x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x)	x
+#endif
+
+OUTPUT_ARCH(arm)
+ENTRY(stext)
+
+#ifndef __ARMEB__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+
+SECTIONS
+{
+	/*
+	 * XXX: The linker does not define how output sections are
+	 * assigned to input sections when there are multiple statements
+	 * matching the same input section name.  There is no documented
+	 * order of matching.
+	 *
+	 * unwind exit sections must be discarded before the rest of the
+	 * unwind sections get included.
+	 */
+	/DISCARD/ : {
+		*(.ARM.exidx.exit.text)
+		*(.ARM.extab.exit.text)
+		ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
+		ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
+		ARM_EXIT_DISCARD(EXIT_TEXT)
+		ARM_EXIT_DISCARD(EXIT_DATA)
+		EXIT_CALL
+#ifndef CONFIG_HOTPLUG
+		*(.ARM.exidx.devexit.text)
+		*(.ARM.extab.devexit.text)
+#endif
+#ifndef CONFIG_MMU
+		*(.fixup)
+		*(__ex_table)
+#endif
+#ifndef CONFIG_SMP_ON_UP
+		*(.alt.smp.init)
+#endif
+		*(.discard)
+		*(.discard.*)
+	}
+
+#ifdef CONFIG_XIP_KERNEL
+	. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+#else
+	. = PAGE_OFFSET + TEXT_OFFSET;
+#endif
+	.head.text : {
+		_text = .;
+		HEAD_TEXT
+	}
+	.text : {			/* Real text segment		*/
+		_stext = .;		/* Text and read-only data	*/
+			__exception_text_start = .;
+			KEEP(*(.exception.text))
+			__exception_text_end = .;
+			IRQENTRY_TEXT
+			TEXT_TEXT
+			SCHED_TEXT
+			LOCK_TEXT
+			KPROBES_TEXT
+			IDMAP_TEXT
+#ifdef CONFIG_MMU
+			KEEP(*(.fixup))
+#endif
+			KEEP(*(.gnu.warning))
+			KEEP(*(.glue_7))
+			KEEP(*(.glue_7t))
+			. = ALIGN(4);
+			KEEP(*(.got))			/* Global offset table		*/
+			ARM_CPU_KEEP(PROC_INFO)
+	}
+
+	RO_DATA(PAGE_SIZE)
+
+#ifdef CONFIG_ARM_UNWIND
+	/*
+	 * Stack unwinding tables
+	 */
+	. = ALIGN(8);
+	.ARM.unwind_idx : {
+		__start_unwind_idx = .;
+		*(.ARM.exidx*)
+		__stop_unwind_idx = .;
+	}
+	.ARM.unwind_tab : {
+		__start_unwind_tab = .;
+		*(.ARM.extab*)
+		__stop_unwind_tab = .;
+	}
+#endif
+
+	_etext = .;			/* End of text and rodata section */
+
+#ifndef CONFIG_XIP_KERNEL
+	. = ALIGN(PAGE_SIZE);
+	__init_begin = .;
+#endif
+
+	INIT_TEXT_SECTION(8)
+	.exit.text : {
+		ARM_EXIT_KEEP(EXIT_TEXT)
+	}
+	.init.proc.info : {
+		ARM_CPU_DISCARD(PROC_INFO)
+	}
+	.init.arch.info : {
+		__arch_info_begin = .;
+		KEEP(*(.arch.info.init))
+		__arch_info_end = .;
+	}
+	.init.tagtable : {
+		__tagtable_begin = .;
+		KEEP(*(.taglist.init))
+		__tagtable_end = .;
+	}
+#ifdef CONFIG_SMP_ON_UP
+	.init.smpalt : {
+		__smpalt_begin = .;
+		*(.alt.smp.init)
+		__smpalt_end = .;
+	}
+#endif
+	.init.pv_table : {
+		__pv_table_begin = .;
+		*(.pv_table)
+		__pv_table_end = .;
+	}
+	.init.data : {
+#ifndef CONFIG_XIP_KERNEL
+		INIT_DATA
+#endif
+		INIT_SETUP(16)
+		INIT_CALLS
+		CON_INITCALL
+		SECURITY_INITCALL
+		INIT_RAM_FS
+	}
+#ifndef CONFIG_XIP_KERNEL
+	.exit.data : {
+		ARM_EXIT_KEEP(EXIT_DATA)
+	}
+#endif
+
+	PERCPU_SECTION(L1_CACHE_BYTES)
+
+#ifdef CONFIG_XIP_KERNEL
+	__data_loc = ALIGN(4);		/* location in binary */
+	. = PAGE_OFFSET + TEXT_OFFSET;
+#else
+	__init_end = .;
+	. = ALIGN(THREAD_SIZE);
+	__data_loc = .;
+#endif
+
+	.data : AT(__data_loc) {
+		_data = .;		/* address in memory */
+		_sdata = .;
+
+		/*
+		 * first, the init task union, aligned
+		 * to an 8192 byte boundary.
+		 */
+		INIT_TASK_DATA(THREAD_SIZE)
+
+#ifdef CONFIG_XIP_KERNEL
+		. = ALIGN(PAGE_SIZE);
+		__init_begin = .;
+		INIT_DATA
+		ARM_EXIT_KEEP(EXIT_DATA)
+		. = ALIGN(PAGE_SIZE);
+		__init_end = .;
+#endif
+
+		NOSAVE_DATA
+		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+		READ_MOSTLY_DATA(L1_CACHE_BYTES)
+
+		/*
+		 * The exception fixup table (might need resorting at runtime)
+		 */
+		. = ALIGN(4);
+		__start___ex_table = .;
+#ifdef CONFIG_MMU
+		*(__ex_table)
+#endif
+		__stop___ex_table = .;
+
+		/*
+		 * and the usual data section
+		 */
+		DATA_DATA
+		CONSTRUCTORS
+
+		_edata = .;
+	}
+	_edata_loc = __data_loc + SIZEOF(.data);
+
+#ifdef CONFIG_HAVE_TCM
+        /*
+	 * We align everything to a page boundary so we can
+	 * free it after init has commenced and TCM contents have
+	 * been copied to its destination.
+	 */
+	.tcm_start : {
+		. = ALIGN(PAGE_SIZE);
+		__tcm_start = .;
+		__itcm_start = .;
+	}
+
+	/*
+	 * Link these to the ITCM RAM
+	 * Put VMA to the TCM address and LMA to the common RAM
+	 * and we'll upload the contents from RAM to TCM and free
+	 * the used RAM after that.
+	 */
+	.text_itcm ITCM_OFFSET : AT(__itcm_start)
+	{
+		__sitcm_text = .;
+		*(.tcm.text)
+		*(.tcm.rodata)
+		. = ALIGN(4);
+		__eitcm_text = .;
+	}
+
+	/*
+	 * Reset the dot pointer, this is needed to create the
+	 * relative __dtcm_start below (to be used as extern in code).
+	 */
+	. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
+
+	.dtcm_start : {
+		__dtcm_start = .;
+	}
+
+	/* TODO: add remainder of ITCM as well, that can be used for data! */
+	.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
+	{
+		. = ALIGN(4);
+		__sdtcm_data = .;
+		*(.tcm.data)
+		. = ALIGN(4);
+		__edtcm_data = .;
+	}
+
+	/* Reset the dot pointer or the linker gets confused */
+	. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
+
+	/* End marker for freeing TCM copy in linked object */
+	.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
+		. = ALIGN(PAGE_SIZE);
+		__tcm_end = .;
+	}
+#endif
+
+	NOTES
+
+	BSS_SECTION(0, 0, 0)
+	_end = .;
+
+	STABS_DEBUG
+	.comment 0 : { *(.comment) }
+}
+
+/*
+ * These must never be empty
+ * If you have to comment these two assert statements out, your
+ * binutils is too old (for other reasons as well)
+ */
+ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
diff --git a/ap/os/linux/linux-3.4.x/arch/arm/kernel/xscale-cp0.c b/ap/os/linux/linux-3.4.x/arch/arm/kernel/xscale-cp0.c
new file mode 100644
index 0000000..e42adc6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/arch/arm/kernel/xscale-cp0.c
@@ -0,0 +1,178 @@
+/*
+ * linux/arch/arm/kernel/xscale-cp0.c
+ *
+ * XScale DSP and iWMMXt coprocessor context switching and handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/thread_notify.h>
+
+static inline void dsp_save_state(u32 *state)
+{
+	__asm__ __volatile__ (
+		"mrrc	p0, 0, %0, %1, c0\n"
+		: "=r" (state[0]), "=r" (state[1]));
+}
+
+static inline void dsp_load_state(u32 *state)
+{
+	__asm__ __volatile__ (
+		"mcrr	p0, 0, %0, %1, c0\n"
+		: : "r" (state[0]), "r" (state[1]));
+}
+
+static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+	struct thread_info *thread = t;
+
+	switch (cmd) {
+	case THREAD_NOTIFY_FLUSH:
+		thread->cpu_context.extra[0] = 0;
+		thread->cpu_context.extra[1] = 0;
+		break;
+
+	case THREAD_NOTIFY_SWITCH:
+		dsp_save_state(current_thread_info()->cpu_context.extra);
+		dsp_load_state(thread->cpu_context.extra);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dsp_notifier_block = {
+	.notifier_call	= dsp_do,
+};
+
+
+#ifdef CONFIG_IWMMXT
+static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
+{
+	struct thread_info *thread = t;
+
+	switch (cmd) {
+	case THREAD_NOTIFY_FLUSH:
+		/*
+		 * flush_thread() zeroes thread->fpstate, so no need
+		 * to do anything here.
+		 *
+		 * FALLTHROUGH: Ensure we don't try to overwrite our newly
+		 * initialised state information on the first fault.
+		 */
+
+	case THREAD_NOTIFY_EXIT:
+		iwmmxt_task_release(thread);
+		break;
+
+	case THREAD_NOTIFY_SWITCH:
+		iwmmxt_task_switch(thread);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iwmmxt_notifier_block = {
+	.notifier_call	= iwmmxt_do,
+};
+#endif
+
+
+static u32 __init xscale_cp_access_read(void)
+{
+	u32 value;
+
+	__asm__ __volatile__ (
+		"mrc	p15, 0, %0, c15, c1, 0\n\t"
+		: "=r" (value));
+
+	return value;
+}
+
+static void __init xscale_cp_access_write(u32 value)
+{
+	u32 temp;
+
+	__asm__ __volatile__ (
+		"mcr	p15, 0, %1, c15, c1, 0\n\t"
+		"mrc	p15, 0, %0, c15, c1, 0\n\t"
+		"mov	%0, %0\n\t"
+		"sub	pc, pc, #4\n\t"
+		: "=r" (temp) : "r" (value));
+}
+
+/*
+ * Detect whether we have a MAC coprocessor (40 bit register) or an
+ * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
+ * into a coprocessor register and reading it back, and checking
+ * whether the upper word survived intact.
+ */
+static int __init cpu_has_iwmmxt(void)
+{
+	u32 lo;
+	u32 hi;
+
+	/*
+	 * This sequence is interpreted by the DSP coprocessor as:
+	 *	mar	acc0, %2, %3
+	 *	mra	%0, %1, acc0
+	 *
+	 * And by the iWMMXt coprocessor as:
+	 *	tmcrr	wR0, %2, %3
+	 *	tmrrc	%0, %1, wR0
+	 */
+	__asm__ __volatile__ (
+		"mcrr	p0, 0, %2, %3, c0\n"
+		"mrrc	p0, 0, %0, %1, c0\n"
+		: "=r" (lo), "=r" (hi)
+		: "r" (0), "r" (0x100));
+
+	return !!hi;
+}
+
+
+/*
+ * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
+ * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
+ * switch code handle iWMMXt context switching.  If on the other
+ * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
+ * all the time, and save/restore acc0 on context switch in non-lazy
+ * fashion.
+ */
+static int __init xscale_cp0_init(void)
+{
+	u32 cp_access;
+
+	cp_access = xscale_cp_access_read() & ~3;
+	xscale_cp_access_write(cp_access | 1);
+
+	if (cpu_has_iwmmxt()) {
+#ifndef CONFIG_IWMMXT
+		printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
+			"detected, but kernel support is missing.\n");
+#else
+		printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
+		elf_hwcap |= HWCAP_IWMMXT;
+		thread_register_notifier(&iwmmxt_notifier_block);
+#endif
+	} else {
+		printk(KERN_INFO "XScale DSP coprocessor detected.\n");
+		thread_register_notifier(&dsp_notifier_block);
+		cp_access |= 1;
+	}
+
+	xscale_cp_access_write(cp_access);
+
+	return 0;
+}
+
+late_initcall(xscale_cp0_init);