blob: 3d60e06cfc6d988080c4959c9fbf13f4a3cc1679 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2009 Corey Tabaka
3 * Copyright (c) 2014 Intel Corporation
4 * Copyright (c) 2014 Travis Geiselbrecht
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction,
9 * including without limitation the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include <sys/types.h>
26#include <string.h>
27#include <stdlib.h>
28#include <debug.h>
29#include <kernel/thread.h>
30#include <kernel/spinlock.h>
31#include <arch/x86.h>
32#include <arch/x86/descriptor.h>
33#include <arch/fpu.h>
34
35/* we're uniprocessor at this point for x86-64, so store a global pointer to the current thread */
36struct thread *_current_thread;
37
38static void initial_thread_func(void) __NO_RETURN;
39static void initial_thread_func(void)
40{
41 int ret;
42
43 /* release the thread lock that was implicitly held across the reschedule */
44 spin_unlock(&thread_lock);
45 arch_enable_ints();
46
47 ret = _current_thread->entry(_current_thread->arg);
48
49 thread_exit(ret);
50}
51
52void arch_thread_initialize(thread_t *t)
53{
54 /* create a default stack frame on the stack */
55 vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
56
57 /* make sure the top of the stack is 8 byte aligned
58 for EABI compliance */
59
60 stack_top = ROUNDDOWN(stack_top, 8);
61
62 struct x86_context_switch_frame *frame =
63 (struct x86_context_switch_frame *)(stack_top);
64 frame--;
65
66 /* fill it in */
67 memset(frame, 0, sizeof(*frame));
68
69 frame->rip = (vaddr_t) &initial_thread_func;
70 frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
71
72 /* set the stack pointer */
73 t->arch.rsp = (vaddr_t)frame;
74#if X86_WITH_FPU
75 fpu_init_thread_states(t);
76#endif
77}
78
79void arch_dump_thread(thread_t *t)
80{
81 if (t->state != THREAD_RUNNING) {
82 dprintf(INFO, "\tarch: ");
83 dprintf(INFO, "sp 0x%lx\n", t->arch.rsp);
84 }
85}
86
87void arch_context_switch(thread_t *oldthread, thread_t *newthread)
88{
89#if X86_WITH_FPU
90 fpu_context_switch(oldthread, newthread);
91#endif
92
93 x86_64_context_switch(&oldthread->arch.rsp, newthread->arch.rsp);
94}