[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/arch/x86-64/thread.c b/src/bsp/lk/arch/x86-64/thread.c
new file mode 100755
index 0000000..3d60e06
--- /dev/null
+++ b/src/bsp/lk/arch/x86-64/thread.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2009 Corey Tabaka
+ * Copyright (c) 2014 Intel Corporation
+ * Copyright (c) 2014 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <debug.h>
+#include <kernel/thread.h>
+#include <kernel/spinlock.h>
+#include <arch/x86.h>
+#include <arch/x86/descriptor.h>
+#include <arch/fpu.h>
+
+/* we're uniprocessor at this point for x86-64, so store a global pointer to the current thread */
+struct thread *_current_thread;
+
+static void initial_thread_func(void) __NO_RETURN;
+static void initial_thread_func(void)
+{
+    int ret;
+
+    /* release the thread lock that was implicitly held across the reschedule */
+    spin_unlock(&thread_lock);
+    arch_enable_ints();
+
+    ret = _current_thread->entry(_current_thread->arg);
+
+    thread_exit(ret);
+}
+
+void arch_thread_initialize(thread_t *t)
+{
+    /* create a default stack frame on the stack */
+    vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
+
+    /* make sure the top of the stack is 8 byte aligned
+              for EABI compliance */
+
+    stack_top = ROUNDDOWN(stack_top, 8);
+
+    struct x86_context_switch_frame *frame =
+        (struct x86_context_switch_frame *)(stack_top);
+    frame--;
+
+    /* fill it in */
+    memset(frame, 0, sizeof(*frame));
+
+    frame->rip = (vaddr_t) &initial_thread_func;
+    frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
+
+    /* set the stack pointer */
+    t->arch.rsp = (vaddr_t)frame;
+#if X86_WITH_FPU
+    fpu_init_thread_states(t);
+#endif
+}
+
+void arch_dump_thread(thread_t *t)
+{
+    if (t->state != THREAD_RUNNING) {
+        dprintf(INFO, "\tarch: ");
+        dprintf(INFO, "sp 0x%lx\n", t->arch.rsp);
+    }
+}
+
+void arch_context_switch(thread_t *oldthread, thread_t *newthread)
+{
+#if X86_WITH_FPU
+    fpu_context_switch(oldthread, newthread);
+#endif
+
+    x86_64_context_switch(&oldthread->arch.rsp, newthread->arch.rsp);
+}