blob: c0a1cc4c7076638a984607022285ac98097e2234 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2012 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include <sys/types.h>
24#include <string.h>
25#include <stdlib.h>
26#include <debug.h>
27#include <trace.h>
28#include <assert.h>
29#include <kernel/thread.h>
30#include <arch/arm.h>
31#include <arch/arm/cm.h>
32
33#define LOCAL_TRACE 0
34
35struct arm_cm_context_switch_frame {
36 uint32_t r4;
37 uint32_t r5;
38 uint32_t r6;
39 uint32_t r7;
40 uint32_t r8;
41 uint32_t r9;
42 uint32_t r10;
43 uint32_t r11;
44 uint32_t lr;
45};
46
47/* since we're implicitly uniprocessor, store a pointer to the current thread here */
48thread_t *_current_thread;
49
50static void initial_thread_func(void) __NO_RETURN;
51static void initial_thread_func(void)
52{
53 int ret;
54
55 LTRACEF("thread %p calling %p with arg %p\n", _current_thread, _current_thread->entry, _current_thread->arg);
56#if LOCAL_TRACE
57 dump_thread(_current_thread);
58#endif
59
60 /* release the thread lock that was implicitly held across the reschedule */
61 spin_unlock(&thread_lock);
62 arch_enable_ints();
63
64 ret = _current_thread->entry(_current_thread->arg);
65
66 LTRACEF("thread %p exiting with %d\n", _current_thread, ret);
67
68 thread_exit(ret);
69}
70
71void arch_thread_initialize(struct thread *t)
72{
73 LTRACEF("thread %p, stack %p\n", t, t->stack);
74
75 /* find the top of the stack and align it on an 8 byte boundary */
76 uint32_t *sp = (void *)ROUNDDOWN((vaddr_t)t->stack + t->stack_size, 8);
77
78 struct arm_cm_context_switch_frame *frame = (void *)sp;
79 frame--;
80
81 /* arrange for lr to point to our starting routine */
82 frame->lr = (uint32_t)&initial_thread_func;
83
84 t->arch.sp = (addr_t)frame;
85 t->arch.was_preempted = false;
86}
87
88volatile struct arm_cm_exception_frame_long *preempt_frame;
89
90static void pendsv(struct arm_cm_exception_frame_long *frame)
91{
92 arch_disable_ints();
93
94 LTRACEF("preempting thread %p (%s)\n", _current_thread, _current_thread->name);
95
96 /* save the iframe the pendsv fired on and hit the preemption code */
97 preempt_frame = frame;
98 thread_preempt();
99
100 LTRACEF("fell through\n");
101
102 /* if we got here, there wasn't anything to switch to, so just fall through and exit */
103 preempt_frame = NULL;
104
105 arch_enable_ints();
106}
107
108/*
109 * raw pendsv exception handler, triggered by interrupt glue to schedule
110 * a preemption check.
111 */
112__NAKED void _pendsv(void)
113{
114 __asm__ volatile(
115#if (__CORTEX_M >= 0x03)
116
117 "push { r4-r11, lr };"
118 "mov r0, sp;"
119 "bl %0;"
120 "pop { r4-r11, lr };"
121 "bx lr;"
122#else
123 "push { lr };"
124 "mov r0, r8;"
125 "mov r1, r9;"
126 "mov r2, r10;"
127 "mov r3, r11;"
128 "push { r0-r3 };"
129 "push { r4-r7 };"
130 "mov r0, sp;"
131 "bl %c0;"
132 "pop { r4-r7 };"
133 "pop { r0-r3 };"
134 "mov r8 , r0;"
135 "mov r9 , r1;"
136 "mov r10, r2;"
137 "mov r11, r3;"
138 "pop { r0 };"
139 "mov lr, r0;"
140 "bx lr;"
141#endif
142 :: "i" (pendsv)
143 );
144 __UNREACHABLE;
145}
146/*
147 * svc handler, used to hard switch the cpu into exception mode to return
148 * to preempted thread.
149 */
150__NAKED void _svc(void)
151{
152 __asm__ volatile(
153 /* load the pointer to the original exception frame we want to restore */
154#if (__CORTEX_M >= 0x03)
155 "mov sp, r4;"
156 "pop { r4-r11, lr };"
157 "bx lr;"
158#else
159 "mov sp, r4;"
160 "pop { r4-r7 };"
161 "pop { r0-r3 };"
162 "mov r8 , r0;"
163 "mov r9 , r1;"
164 "mov r10, r2;"
165 "mov r11, r3;"
166 "pop { pc };"
167#endif
168 );
169}
170
171__NAKED static void _half_save_and_svc(vaddr_t *fromsp, vaddr_t tosp)
172{
173 __asm__ volatile(
174#if (__CORTEX_M >= 0x03)
175
176 "push { r4-r11, lr };"
177 "str sp, [r0];"
178
179 /* make sure we load the destination sp here before we reenable interrupts */
180 "mov sp, r1;"
181
182 "clrex;"
183 "cpsie i;"
184
185 "mov r4, r1;"
186 "svc #0;" /* make a svc call to get us into handler mode */
187
188#else
189 "push { lr };"
190 "mov r2, r10;"
191 "mov r3, r11;"
192 "push { r2-r3 };"
193 "mov r2, r8;"
194 "mov r3, r9;"
195 "push { r2-r3 };"
196 "push { r4-r7 };"
197
198 "mov r3, sp;"
199 "str r3, [r0];"
200 "mov sp, r1;"
201 "cpsie i;"
202
203 "mov r4, r1;"
204 "svc #0;" /* make a svc call to get us into handler mode */
205#endif
206 );
207}
208
209/* simple scenario where the to and from thread yielded */
210__NAKED static void _arch_non_preempt_context_switch(vaddr_t *fromsp, vaddr_t tosp)
211{
212 __asm__ volatile(
213#if (__CORTEX_M >= 0x03)
214 "push { r4-r11, lr };"
215 "str sp, [r0];"
216
217 "mov sp, r1;"
218 "pop { r4-r11, lr };"
219 "clrex;"
220 "bx lr;"
221#else
222 "push { lr };"
223 "mov r2, r10;"
224 "mov r3, r11;"
225 "push { r2-r3 };"
226 "mov r2, r8;"
227 "mov r3, r9;"
228 "push { r2-r3 };"
229 "push { r4-r7 };"
230
231 "mov r3, sp;"
232 "str r3, [r0];"
233 "mov sp, r1;"
234
235 "pop { r4-r7 };"
236 "pop { r0-r3 };"
237 "mov r8 , r0;"
238 "mov r9 , r1;"
239 "mov r10, r2;"
240 "mov r11, r3;"
241 "pop { pc };"
242#endif
243 );
244}
245
246__NAKED static void _thread_mode_bounce(void)
247{
248 __asm__ volatile(
249#if (__CORTEX_M >= 0x03)
250 "pop { r4-r11, lr };"
251 "bx lr;"
252#else
253 "pop { r4-r7 };"
254 "pop { r0-r3 };"
255 "mov r8 , r0;"
256 "mov r9 , r1;"
257 "mov r10, r2;"
258 "mov r11, r3;"
259 "pop { pc };"
260#endif
261 );
262 __UNREACHABLE;
263}
264
265/*
266 * The raw context switch routine. Called by the scheduler when it decides to switch.
267 * Called either in the context of a thread yielding or blocking (interrupts disabled,
268 * on the system stack), or inside the pendsv handler on a thread that is being preempted
269 * (interrupts disabled, in handler mode). If preempt_frame is set the thread
270 * is being preempted.
271 */
272void arch_context_switch(struct thread *oldthread, struct thread *newthread)
273{
274 LTRACE_ENTRY;
275
276 /* if preempt_frame is set, we are being preempted */
277 if (preempt_frame) {
278 oldthread->arch.was_preempted = true;
279 oldthread->arch.sp = (addr_t)preempt_frame;
280 preempt_frame = NULL;
281
282 LTRACEF("we're preempted, new %d\n", newthread->arch.was_preempted);
283 if (newthread->arch.was_preempted) {
284 /* return directly to the preempted thread's iframe */
285 __asm__ volatile(
286 "mov sp, %0;"
287#if (__CORTEX_M >= 0x03)
288 "cpsie i;"
289 "pop { r4-r11, lr };"
290 "clrex;"
291 "bx lr;"
292#else
293 "cpsie i;"
294 "pop { r4-r7 };"
295 "pop { r0-r3 };"
296 "mov r8 , r0;"
297 "mov r9 , r1;"
298 "mov r10, r2;"
299 "mov r11, r3;"
300 "pop { pc };"
301#endif
302 :: "r"(newthread->arch.sp)
303 );
304 __UNREACHABLE;
305 } else {
306 /* we're inside a pendsv, switching to a user mode thread */
307 /* set up a fake frame to exception return to */
308 struct arm_cm_exception_frame_short *frame = (void *)newthread->arch.sp;
309 frame--;
310
311 frame->pc = (uint32_t)&_thread_mode_bounce;
312 frame->psr = (1 << 24); /* thread bit set, IPSR 0 */
313 frame->r0 = frame->r1 = frame->r2 = frame->r3 = frame->r12 = frame->lr = 99;
314
315 LTRACEF("iretting to user space\n");
316 //hexdump(frame, sizeof(*frame) + 64);
317
318 __asm__ volatile(
319#if (__CORTEX_M >= 0x03)
320 "clrex;"
321#endif
322 "mov sp, %0;"
323 "bx %1;"
324 :: "r"(frame), "r"(0xfffffff9)
325 );
326 __UNREACHABLE;
327 }
328 } else {
329 oldthread->arch.was_preempted = false;
330
331 if (newthread->arch.was_preempted) {
332 LTRACEF("not being preempted, but switching to preempted thread\n");
333 _half_save_and_svc(&oldthread->arch.sp, newthread->arch.sp);
334 } else {
335 /* fast path, both sides did not preempt */
336 _arch_non_preempt_context_switch(&oldthread->arch.sp, newthread->arch.sp);
337 }
338 }
339
340}
341
342void arch_dump_thread(thread_t *t)
343{
344 if (t->state != THREAD_RUNNING) {
345 dprintf(INFO, "\tarch: ");
346 dprintf(INFO, "sp 0x%lx, was preempted %u\n", t->arch.sp, t->arch.was_preempted);
347 }
348}
349
350