[Feature]add MT2731_MP2_MR2_SVN388 baseline version
Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/arch/arm64/asm.S b/src/bsp/lk/arch/arm64/asm.S
new file mode 100644
index 0000000..3adc0ee
--- /dev/null
+++ b/src/bsp/lk/arch/arm64/asm.S
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <asm.h>
+#include <arch/asm_macros.h>
+
+/* use x9 ~ x15 as scratch registers */
+tmp .req x9
+
+/* void arm64_context_switch(vaddr_t *old_sp, vaddr_t new_sp); */
+FUNCTION(arm64_context_switch)
+ /* save old frame */
+ push x28, x29
+ push x26, x27
+ push x24, x25
+ push x22, x23
+ push x20, x21
+ push x18, x19
+ str x30, [sp,#-16]!
+
+ /* save old sp */
+ mov x15, sp
+ str x15, [x0]
+
+ /* load new sp */
+ mov sp, x1
+
+ /* restore new frame */
+ ldr x30, [sp], #16
+ pop x18, x19
+ pop x20, x21
+ pop x22, x23
+ pop x24, x25
+ pop x26, x27
+ pop x28, x29
+
+ ret
+
+FUNCTION(arm64_chain_load)
+ /* shuffle the args around */
+ mov x5, x0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ mov x3, x4
+ mov x4, x5
+
+#if WITH_KERNEL_VM
+ /* disable MMU */
+ mrs x5, sctlr_el1
+ bic x5, x5, #0x1
+ msr sctlr_el1, x5
+ isb
+#endif
+
+ tlbi vmalle1
+ br x4
+
+FUNCTION(arm64_elX_to_el1)
+ mrs tmp, CurrentEL
+
+ cmp tmp, #(0b01 << 2)
+ bne .notEL1
+ /* Already in EL1 */
+ ret
+
+.notEL1:
+ cmp tmp, #(0b10 << 2)
+ beq .inEL2
+
+
+ /* set EL2 to 64bit */
+ mrs tmp, scr_el3
+ orr tmp, tmp, #(1<<10)
+ msr scr_el3, tmp
+
+
+ adr tmp, .Ltarget
+ msr elr_el3, tmp
+
+ mov tmp, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
+ msr spsr_el3, tmp
+ b .confEL1
+
+.inEL2:
+ adr tmp, .Ltarget
+ msr elr_el2, tmp
+ mov tmp, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
+ msr spsr_el2, tmp
+
+
+
+.confEL1:
+ /* disable EL2 coprocessor traps */
+ mov tmp, #0x33ff
+ msr cptr_el2, tmp
+
+ /* set EL1 to 64bit */
+ mov tmp, #(1<<31)
+ msr hcr_el2, tmp
+
+ /* disable EL1 FPU traps */
+ mov tmp, #(0b11<<20)
+ msr cpacr_el1, tmp
+
+ /* set up the EL1 bounce interrupt */
+ mov tmp, sp
+ msr sp_el1, tmp
+
+ isb
+ eret
+
+
+.Ltarget:
+ ret