blob: 56b0bf9e7bfc6afe9e58201d7daeb42fea0ff302 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2008 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <sys/types.h>
24#include <string.h>
25#include <stdlib.h>
26#include <debug.h>
27#include <trace.h>
28#include <kernel/thread.h>
29#include <arch/arm64.h>
30
31#define LOCAL_TRACE 0
32
33struct context_switch_frame {
34 vaddr_t lr;
35 vaddr_t r18;
36 vaddr_t r19;
37 vaddr_t r20;
38 vaddr_t r21;
39 vaddr_t r22;
40 vaddr_t r23;
41 vaddr_t r24;
42 vaddr_t r25;
43 vaddr_t r26;
44 vaddr_t r27;
45 vaddr_t r28;
46 vaddr_t r29;
47 vaddr_t padding;
48};
49
50extern void arm64_context_switch(addr_t *old_sp, addr_t new_sp);
51
52static void initial_thread_func(void) __NO_RETURN;
53static void initial_thread_func(void)
54{
55 int ret;
56
57 thread_t *current_thread = get_current_thread();
58
59 LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg);
60
61 /* release the thread lock that was implicitly held across the reschedule */
62 spin_unlock(&thread_lock);
63 arch_enable_ints();
64
65 ret = current_thread->entry(current_thread->arg);
66
67 LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret);
68
69 thread_exit(ret);
70}
71
72void arch_thread_initialize(thread_t *t)
73{
74 // create a default stack frame on the stack
75 vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
76
77 // make sure the top of the stack is 16 byte aligned for EABI compliance
78 stack_top = ROUNDDOWN(stack_top, 16);
79
80 struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
81 frame--;
82
83 // fill it in
84 memset(frame, 0, sizeof(*frame));
85 frame->lr = (vaddr_t)&initial_thread_func;
86
87 // set the stack pointer
88 t->arch.sp = (vaddr_t)frame;
89}
90
91void arch_context_switch(thread_t *oldthread, thread_t *newthread)
92{
93 LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
94 arm64_fpu_pre_context_switch(oldthread);
95 arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
96}
97
98void arch_dump_thread(thread_t *t)
99{
100 if (t->state != THREAD_RUNNING) {
101 dprintf(INFO, "\tarch: ");
102 dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
103 }
104}