blob: 688c1d4b29487652f8ea7a093ee84431271c060f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * @file
26 * @brief Kernel threading
27 *
28 * This file is the core kernel threading interface.
29 *
30 * @defgroup thread Threads
31 * @{
32 */
33#include <debug.h>
34#include <assert.h>
35#include <list.h>
36#include <malloc.h>
37#include <string.h>
38#include <printf.h>
39#include <err.h>
40#include <lib/dpc.h>
41#include <kernel/thread.h>
42#include <kernel/timer.h>
43#include <kernel/debug.h>
44#include <kernel/mp.h>
45#include <platform.h>
46#include <target.h>
47#include <lib/heap.h>
48
49#if THREAD_STATS
50struct thread_stats thread_stats[SMP_MAX_CPUS];
51#endif
52
53#define STACK_DEBUG_BYTE (0x99)
54#define STACK_DEBUG_WORD (0x99999999)
55
56#define DEBUG_THREAD_CONTEXT_SWITCH 0
57
58/* global thread list */
59static struct list_node thread_list;
60
61/* master thread spinlock */
62spin_lock_t thread_lock = SPIN_LOCK_INITIAL_VALUE;
63
64/* the run queue */
65static struct list_node run_queue[NUM_PRIORITIES];
66static uint32_t run_queue_bitmap;
67
68/* make sure the bitmap is large enough to cover our number of priorities */
69STATIC_ASSERT(NUM_PRIORITIES <= sizeof(run_queue_bitmap) * 8);
70
71/* the idle thread(s) (statically allocated) */
72#if WITH_SMP
73static thread_t _idle_threads[SMP_MAX_CPUS];
74#define idle_thread(cpu) (&_idle_threads[cpu])
75#else
76static thread_t _idle_thread;
77#define idle_thread(cpu) (&_idle_thread)
78#endif
79
80/* local routines */
81static void thread_resched(void);
82static void idle_thread_routine(void) __NO_RETURN;
83
84#if PLATFORM_HAS_DYNAMIC_TIMER
85/* preemption timer */
86static timer_t preempt_timer[SMP_MAX_CPUS];
87#endif
88
89/* run queue manipulation */
90static void insert_in_run_queue_head(thread_t *t)
91{
92 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
93 DEBUG_ASSERT(t->state == THREAD_READY);
94 DEBUG_ASSERT(!list_in_list(&t->queue_node));
95 DEBUG_ASSERT(arch_ints_disabled());
96 DEBUG_ASSERT(spin_lock_held(&thread_lock));
97
98 list_add_head(&run_queue[t->priority], &t->queue_node);
99 run_queue_bitmap |= (1<<t->priority);
100}
101
102static void insert_in_run_queue_tail(thread_t *t)
103{
104 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
105 DEBUG_ASSERT(t->state == THREAD_READY);
106 DEBUG_ASSERT(!list_in_list(&t->queue_node));
107 DEBUG_ASSERT(arch_ints_disabled());
108 DEBUG_ASSERT(spin_lock_held(&thread_lock));
109
110 list_add_tail(&run_queue[t->priority], &t->queue_node);
111 run_queue_bitmap |= (1<<t->priority);
112}
113
114static void init_thread_struct(thread_t *t, const char *name)
115{
116 memset(t, 0, sizeof(thread_t));
117 t->magic = THREAD_MAGIC;
118 thread_set_pinned_cpu(t, -1);
119 strlcpy(t->name, name, sizeof(t->name));
120}
121
122/**
123 * @brief Create a new thread
124 *
125 * This function creates a new thread. The thread is initially suspended, so you
126 * need to call thread_resume() to execute it.
127 *
128 * @param name Name of thread
129 * @param entry Entry point of thread
130 * @param arg Arbitrary argument passed to entry()
131 * @param priority Execution priority for the thread.
132 * @param stack_size Stack size for the thread.
133 *
134 * Thread priority is an integer from 0 (lowest) to 31 (highest). Some standard
135 * prioritys are defined in <kernel/thread.h>:
136 *
137 * HIGHEST_PRIORITY
138 * DPC_PRIORITY
139 * HIGH_PRIORITY
140 * DEFAULT_PRIORITY
141 * LOW_PRIORITY
142 * IDLE_PRIORITY
143 * LOWEST_PRIORITY
144 *
145 * Stack size is typically set to DEFAULT_STACK_SIZE
146 *
147 * @return Pointer to thread object, or NULL on failure.
148 */
149thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size)
150{
151 unsigned int flags = 0;
152
153 if (!t) {
154 t = malloc(sizeof(thread_t));
155 if (!t)
156 return NULL;
157 flags |= THREAD_FLAG_FREE_STRUCT;
158 }
159
160 init_thread_struct(t, name);
161
162 t->entry = entry;
163 t->arg = arg;
164 t->priority = priority;
165 t->state = THREAD_SUSPENDED;
166 t->blocking_wait_queue = NULL;
167 t->wait_queue_block_ret = NO_ERROR;
168 thread_set_curr_cpu(t, -1);
169
170 t->retcode = 0;
171 wait_queue_init(&t->retcode_wait_queue);
172
173 /* create the stack */
174 if (!stack) {
175#if THREAD_STACK_BOUNDS_CHECK
176 stack_size += THREAD_STACK_PADDING_SIZE;
177 flags |= THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
178#endif
179 t->stack = malloc(stack_size);
180 if (!t->stack) {
181 if (flags & THREAD_FLAG_FREE_STRUCT)
182 free(t);
183 return NULL;
184 }
185 flags |= THREAD_FLAG_FREE_STACK;
186#if THREAD_STACK_BOUNDS_CHECK
187 memset(t->stack, STACK_DEBUG_BYTE, THREAD_STACK_PADDING_SIZE);
188#endif
189 } else {
190 t->stack = stack;
191 }
192
193 t->stack_size = stack_size;
194
195 /* save whether or not we need to free the thread struct and/or stack */
196 t->flags = flags;
197
198 /* inheirit thread local storage from the parent */
199 thread_t *current_thread = get_current_thread();
200 int i;
201 for (i=0; i < MAX_TLS_ENTRY; i++)
202 t->tls[i] = current_thread->tls[i];
203
204 /* set up the initial stack frame */
205 arch_thread_initialize(t);
206
207 /* add it to the global thread list */
208 THREAD_LOCK(state);
209 list_add_head(&thread_list, &t->thread_list_node);
210 THREAD_UNLOCK(state);
211
212 return t;
213}
214
215thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
216{
217 return thread_create_etc(NULL, name, entry, arg, priority, NULL, stack_size);
218}
219
220/**
221 * @brief Flag a thread as real time
222 *
223 * @param t Thread to flag
224 *
225 * @return NO_ERROR on success
226 */
227status_t thread_set_real_time(thread_t *t)
228{
229 if (!t)
230 return ERR_INVALID_ARGS;
231
232 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
233
234 THREAD_LOCK(state);
235#if PLATFORM_HAS_DYNAMIC_TIMER
236 if (t == get_current_thread()) {
237 /* if we're currently running, cancel the preemption timer. */
238 timer_cancel(&preempt_timer[arch_curr_cpu_num()]);
239 }
240#endif
241 t->flags |= THREAD_FLAG_REAL_TIME;
242 THREAD_UNLOCK(state);
243
244 return NO_ERROR;
245}
246
247static bool thread_is_realtime(thread_t *t)
248{
249 return (t->flags & THREAD_FLAG_REAL_TIME) && t->priority > DEFAULT_PRIORITY;
250}
251
252static bool thread_is_idle(thread_t *t)
253{
254 return !!(t->flags & THREAD_FLAG_IDLE);
255}
256
257static bool thread_is_real_time_or_idle(thread_t *t)
258{
259 return !!(t->flags & (THREAD_FLAG_REAL_TIME | THREAD_FLAG_IDLE));
260}
261
262/**
263 * @brief Make a suspended thread executable.
264 *
265 * This function is typically called to start a thread which has just been
266 * created with thread_create()
267 *
268 * @param t Thread to resume
269 *
270 * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
271 */
272status_t thread_resume(thread_t *t)
273{
274 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
275 DEBUG_ASSERT(t->state != THREAD_DEATH);
276
277 bool resched = false;
278 bool ints_disabled = arch_ints_disabled();
279 THREAD_LOCK(state);
280 if (t->state == THREAD_SUSPENDED) {
281 t->state = THREAD_READY;
282 insert_in_run_queue_head(t);
283 if (!ints_disabled) /* HACK, don't resced into bootstrap thread before idle thread is set up */
284 resched = true;
285 }
286
287 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
288
289 THREAD_UNLOCK(state);
290
291 if (resched)
292 thread_yield();
293
294 return NO_ERROR;
295}
296
297status_t thread_detach_and_resume(thread_t *t)
298{
299 status_t err;
300 err = thread_detach(t);
301 if (err < 0)
302 return err;
303 return thread_resume(t);
304}
305
306status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
307{
308 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
309
310 THREAD_LOCK(state);
311
312 if (t->flags & THREAD_FLAG_DETACHED) {
313 /* the thread is detached, go ahead and exit */
314 THREAD_UNLOCK(state);
315 return ERR_THREAD_DETACHED;
316 }
317
318 /* wait for the thread to die */
319 if (t->state != THREAD_DEATH) {
320 status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
321 if (err < 0) {
322 THREAD_UNLOCK(state);
323 return err;
324 }
325 }
326
327 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
328 DEBUG_ASSERT(t->state == THREAD_DEATH);
329 DEBUG_ASSERT(t->blocking_wait_queue == NULL);
330 DEBUG_ASSERT(!list_in_list(&t->queue_node));
331
332 /* save the return code */
333 if (retcode)
334 *retcode = t->retcode;
335
336 /* remove it from the master thread list */
337 list_delete(&t->thread_list_node);
338
339 /* clear the structure's magic */
340 t->magic = 0;
341
342 THREAD_UNLOCK(state);
343
344 /* free its stack and the thread structure itself */
345 if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
346 free(t->stack);
347
348 if (t->flags & THREAD_FLAG_FREE_STRUCT)
349 free(t);
350
351 return NO_ERROR;
352}
353
354status_t thread_detach(thread_t *t)
355{
356 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
357
358 THREAD_LOCK(state);
359
360 /* if another thread is blocked inside thread_join() on this thread,
361 * wake them up with a specific return code */
362 wait_queue_wake_all(&t->retcode_wait_queue, false, ERR_THREAD_DETACHED);
363
364 /* if it's already dead, then just do what join would have and exit */
365 if (t->state == THREAD_DEATH) {
366 t->flags &= ~THREAD_FLAG_DETACHED; /* makes sure thread_join continues */
367 THREAD_UNLOCK(state);
368 return thread_join(t, NULL, 0);
369 } else {
370 t->flags |= THREAD_FLAG_DETACHED;
371 THREAD_UNLOCK(state);
372 return NO_ERROR;
373 }
374}
375
376/**
377 * @brief Terminate the current thread
378 *
379 * Current thread exits with the specified return code.
380 *
381 * This function does not return.
382 */
383void thread_exit(int retcode)
384{
385 thread_t *current_thread = get_current_thread();
386
387 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
388 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
389 DEBUG_ASSERT(!thread_is_idle(current_thread));
390
391// dprintf("thread_exit: current %p\n", current_thread);
392
393 THREAD_LOCK(state);
394
395 /* enter the dead state */
396 current_thread->state = THREAD_DEATH;
397 current_thread->retcode = retcode;
398
399 /* if we're detached, then do our teardown here */
400 if (current_thread->flags & THREAD_FLAG_DETACHED) {
401 /* remove it from the master thread list */
402 list_delete(&current_thread->thread_list_node);
403
404 /* clear the structure's magic */
405 current_thread->magic = 0;
406
407 /* free its stack and the thread structure itself */
408 if (current_thread->flags & THREAD_FLAG_FREE_STACK && current_thread->stack) {
409 heap_delayed_free(current_thread->stack);
410
411 /* make sure its not going to get a bounds check performed on the half-freed stack */
412 current_thread->flags &= ~THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
413 }
414
415 if (current_thread->flags & THREAD_FLAG_FREE_STRUCT)
416 heap_delayed_free(current_thread);
417 } else {
418 /* signal if anyone is waiting */
419 wait_queue_wake_all(&current_thread->retcode_wait_queue, false, 0);
420 }
421
422 /* reschedule */
423 thread_resched();
424
425 panic("somehow fell through thread_exit()\n");
426}
427
428static void idle_thread_routine(void)
429{
430 for (;;)
431 arch_idle();
432}
433
434static thread_t *get_top_thread(int cpu)
435{
436 thread_t *newthread;
437 uint32_t local_run_queue_bitmap = run_queue_bitmap;
438
439 while (local_run_queue_bitmap) {
440 /* find the first (remaining) queue with a thread in it */
441 uint next_queue = HIGHEST_PRIORITY - __builtin_clz(local_run_queue_bitmap)
442 - (sizeof(run_queue_bitmap) * 8 - NUM_PRIORITIES);
443
444 list_for_every_entry(&run_queue[next_queue], newthread, thread_t, queue_node) {
445#if WITH_SMP
446 if (newthread->pinned_cpu < 0 || newthread->pinned_cpu == cpu)
447#endif
448 {
449 list_delete(&newthread->queue_node);
450
451 if (list_is_empty(&run_queue[next_queue]))
452 run_queue_bitmap &= ~(1<<next_queue);
453
454 return newthread;
455 }
456 }
457
458 local_run_queue_bitmap &= ~(1<<next_queue);
459 }
460 /* no threads to run, select the idle thread for this cpu */
461 return idle_thread(cpu);
462}
463
464/**
465 * @brief Cause another thread to be executed.
466 *
467 * Internal reschedule routine. The current thread needs to already be in whatever
468 * state and queues it needs to be in. This routine simply picks the next thread and
469 * switches to it.
470 *
471 * This is probably not the function you're looking for. See
472 * thread_yield() instead.
473 */
474void thread_resched(void)
475{
476 thread_t *oldthread;
477 thread_t *newthread;
478
479 thread_t *current_thread = get_current_thread();
480 uint cpu = arch_curr_cpu_num();
481
482 DEBUG_ASSERT(arch_ints_disabled());
483 DEBUG_ASSERT(spin_lock_held(&thread_lock));
484 DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
485
486 THREAD_STATS_INC(reschedules);
487
488 newthread = get_top_thread(cpu);
489
490 DEBUG_ASSERT(newthread);
491
492 newthread->state = THREAD_RUNNING;
493
494 oldthread = current_thread;
495
496 if (newthread == oldthread)
497 return;
498
499 /* set up quantum for the new thread if it was consumed */
500 if (newthread->remaining_quantum <= 0) {
501 newthread->remaining_quantum = 5; // XXX make this smarter
502 }
503
504 /* mark the cpu ownership of the threads */
505 thread_set_curr_cpu(oldthread, -1);
506 thread_set_curr_cpu(newthread, cpu);
507
508#if WITH_SMP
509 if (thread_is_idle(newthread)) {
510 mp_set_cpu_idle(cpu);
511 } else {
512 mp_set_cpu_busy(cpu);
513 }
514
515 if (thread_is_realtime(newthread)) {
516 mp_set_cpu_realtime(cpu);
517 } else {
518 mp_set_cpu_non_realtime(cpu);
519 }
520#endif
521
522#if THREAD_STATS
523 THREAD_STATS_INC(context_switches);
524
525 if (thread_is_idle(oldthread)) {
526 lk_bigtime_t now = current_time_hires();
527 thread_stats[cpu].idle_time += now - thread_stats[cpu].last_idle_timestamp;
528 }
529 if (thread_is_idle(newthread)) {
530 thread_stats[cpu].last_idle_timestamp = current_time_hires();
531 }
532#endif
533
534 KEVLOG_THREAD_SWITCH(oldthread, newthread);
535
536#if PLATFORM_HAS_DYNAMIC_TIMER
537 if (thread_is_real_time_or_idle(newthread)) {
538 if (!thread_is_real_time_or_idle(oldthread)) {
539 /* if we're switching from a non real time to a real time, cancel
540 * the preemption timer. */
541#if DEBUG_THREAD_CONTEXT_SWITCH
542 dprintf(ALWAYS, "arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
543 cpu, oldthread, oldthread->name, newthread, newthread->name);
544#endif
545 timer_cancel(&preempt_timer[cpu]);
546 }
547 } else if (thread_is_real_time_or_idle(oldthread)) {
548 /* if we're switching from a real time (or idle thread) to a regular one,
549 * set up a periodic timer to run our preemption tick. */
550#if DEBUG_THREAD_CONTEXT_SWITCH
551 dprintf(ALWAYS, "arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
552 cpu, oldthread, oldthread->name, newthread, newthread->name);
553#endif
554 timer_set_periodic(&preempt_timer[cpu], 10, thread_timer_tick, NULL);
555 }
556#endif
557
558 /* set some optional target debug leds */
559 target_set_debug_led(0, !thread_is_idle(newthread));
560
561 /* do the switch */
562 set_current_thread(newthread);
563
564#if DEBUG_THREAD_CONTEXT_SWITCH
565 dprintf(ALWAYS, "arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
566 cpu, oldthread, oldthread->name, oldthread->priority,
567 oldthread->flags, newthread, newthread->name,
568 newthread->priority, newthread->flags);
569#endif
570
571#if THREAD_STACK_BOUNDS_CHECK
572 /* check that the old thread has not blown its stack just before pushing its context */
573 if (oldthread->flags & THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK) {
574 STATIC_ASSERT((THREAD_STACK_PADDING_SIZE % sizeof(uint32_t)) == 0);
575 uint32_t *s = (uint32_t *)oldthread->stack;
576 for (size_t i = 0; i < THREAD_STACK_PADDING_SIZE / sizeof(uint32_t); i++) {
577 if (unlikely(s[i] != STACK_DEBUG_WORD)) {
578 /* NOTE: will probably blow the stack harder here, but hopefully enough
579 * state exists to at least get some sort of debugging done.
580 */
581 panic("stack overrun at %p: thread %p (%s), stack %p\n", &s[i],
582 oldthread, oldthread->name, oldthread->stack);
583 }
584 }
585 }
586#endif
587
588#ifdef WITH_LIB_UTHREAD
589 uthread_context_switch(oldthread, newthread);
590#endif
591 arch_context_switch(oldthread, newthread);
592}
593
594/**
595 * @brief Yield the cpu to another thread
596 *
597 * This function places the current thread at the end of the run queue
598 * and yields the cpu to another waiting thread (if any.)
599 *
600 * This function will return at some later time. Possibly immediately if
601 * no other threads are waiting to execute.
602 */
603void thread_yield(void)
604{
605 thread_t *current_thread = get_current_thread();
606
607 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
608 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
609
610 THREAD_LOCK(state);
611
612 THREAD_STATS_INC(yields);
613
614 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
615 current_thread->state = THREAD_READY;
616 current_thread->remaining_quantum = 0;
617 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
618 insert_in_run_queue_tail(current_thread);
619 }
620 thread_resched();
621
622 THREAD_UNLOCK(state);
623}
624
625/**
626 * @brief Briefly yield cpu to another thread
627 *
628 * This function is similar to thread_yield(), except that it will
629 * restart more quickly.
630 *
631 * This function places the current thread at the head of the run
632 * queue and then yields the cpu to another thread.
633 *
634 * Exception: If the time slice for this thread has expired, then
635 * the thread goes to the end of the run queue.
636 *
637 * This function will return at some later time. Possibly immediately if
638 * no other threads are waiting to execute.
639 */
640void thread_preempt(void)
641{
642 thread_t *current_thread = get_current_thread();
643
644 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
645 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
646
647#if THREAD_STATS
648 if (!thread_is_idle(current_thread))
649 THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
650#endif
651
652 KEVLOG_THREAD_PREEMPT(current_thread);
653
654 THREAD_LOCK(state);
655
656 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
657 current_thread->state = THREAD_READY;
658 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
659 if (current_thread->remaining_quantum > 0)
660 insert_in_run_queue_head(current_thread);
661 else
662 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
663 }
664 thread_resched();
665
666 THREAD_UNLOCK(state);
667}
668
669/**
670 * @brief Suspend thread until woken.
671 *
672 * This function schedules another thread to execute. This function does not
673 * return until the thread is made runable again by some other module.
674 *
675 * You probably don't want to call this function directly; it's meant to be called
676 * from other modules, such as mutex, which will presumably set the thread's
677 * state to blocked and add it to some queue or another.
678 */
679void thread_block(void)
680{
681 __UNUSED thread_t *current_thread = get_current_thread();
682
683 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
684 DEBUG_ASSERT(current_thread->state == THREAD_BLOCKED);
685 DEBUG_ASSERT(spin_lock_held(&thread_lock));
686 DEBUG_ASSERT(!thread_is_idle(current_thread));
687
688 /* we are blocking on something. the blocking code should have already stuck us on a queue */
689 thread_resched();
690}
691
692void thread_unblock(thread_t *t, bool resched)
693{
694 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
695 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
696 DEBUG_ASSERT(spin_lock_held(&thread_lock));
697 DEBUG_ASSERT(!thread_is_idle(t));
698
699 t->state = THREAD_READY;
700 insert_in_run_queue_head(t);
701 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
702 if (resched)
703 thread_resched();
704}
705
706enum handler_return thread_timer_tick(struct timer *t, lk_time_t now, void *arg)
707{
708 thread_t *current_thread = get_current_thread();
709
710 if (thread_is_real_time_or_idle(current_thread))
711 return INT_NO_RESCHEDULE;
712
713 current_thread->remaining_quantum--;
714 if (current_thread->remaining_quantum <= 0) {
715 return INT_RESCHEDULE;
716 } else {
717 return INT_NO_RESCHEDULE;
718 }
719}
720
721/* timer callback to wake up a sleeping thread */
722static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, void *arg)
723{
724 thread_t *t = (thread_t *)arg;
725
726 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
727 DEBUG_ASSERT(t->state == THREAD_SLEEPING);
728
729 THREAD_LOCK(state);
730
731 t->state = THREAD_READY;
732 insert_in_run_queue_head(t);
733
734 THREAD_UNLOCK(state);
735
736 return INT_RESCHEDULE;
737}
738
739/**
740 * @brief Put thread to sleep; delay specified in ms
741 *
742 * This function puts the current thread to sleep until the specified
743 * delay in ms has expired.
744 *
745 * Note that this function could sleep for longer than the specified delay if
746 * other threads are running. When the timer expires, this thread will
747 * be placed at the head of the run queue.
748 */
749void thread_sleep(lk_time_t delay)
750{
751 timer_t timer;
752
753 thread_t *current_thread = get_current_thread();
754
755 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
756 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
757 DEBUG_ASSERT(!thread_is_idle(current_thread));
758
759 timer_initialize(&timer);
760
761 THREAD_LOCK(state);
762 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
763 current_thread->state = THREAD_SLEEPING;
764 thread_resched();
765 THREAD_UNLOCK(state);
766}
767
768/**
769 * @brief Initialize threading system
770 *
771 * This function is called once, from kmain()
772 */
773void thread_init_early(void)
774{
775 int i;
776
777 DEBUG_ASSERT(arch_curr_cpu_num() == 0);
778
779 /* initialize the run queues */
780 for (i=0; i < NUM_PRIORITIES; i++)
781 list_initialize(&run_queue[i]);
782
783 /* initialize the thread list */
784 list_initialize(&thread_list);
785
786 /* create a thread to cover the current running state */
787 thread_t *t = idle_thread(0);
788 init_thread_struct(t, "bootstrap");
789
790 /* half construct this thread, since we're already running */
791 t->priority = HIGHEST_PRIORITY;
792 t->state = THREAD_RUNNING;
793 t->flags = THREAD_FLAG_DETACHED;
794 thread_set_curr_cpu(t, 0);
795 thread_set_pinned_cpu(t, 0);
796 wait_queue_init(&t->retcode_wait_queue);
797 list_add_head(&thread_list, &t->thread_list_node);
798 set_current_thread(t);
799}
800
801/**
802 * @brief Complete thread initialization
803 *
804 * This function is called once at boot time
805 */
806void thread_init(void)
807{
808#if PLATFORM_HAS_DYNAMIC_TIMER
809 for (uint i = 0; i < SMP_MAX_CPUS; i++) {
810 timer_initialize(&preempt_timer[i]);
811 }
812#endif
813}
814
815/**
816 * @brief Change name of current thread
817 */
818void thread_set_name(const char *name)
819{
820 thread_t *current_thread = get_current_thread();
821 strlcpy(current_thread->name, name, sizeof(current_thread->name));
822}
823
824/**
825 * @brief Change priority of current thread
826 *
827 * See thread_create() for a discussion of priority values.
828 */
829void thread_set_priority(int priority)
830{
831 thread_t *current_thread = get_current_thread();
832
833 THREAD_LOCK(state);
834
835 if (priority <= IDLE_PRIORITY)
836 priority = IDLE_PRIORITY + 1;
837 if (priority > HIGHEST_PRIORITY)
838 priority = HIGHEST_PRIORITY;
839 current_thread->priority = priority;
840
841 current_thread->state = THREAD_READY;
842 insert_in_run_queue_head(current_thread);
843 thread_resched();
844
845 THREAD_UNLOCK(state);
846}
847
848/**
849 * @brief Become an idle thread
850 *
851 * This function marks the current thread as the idle thread -- the one which
852 * executes when there is nothing else to do. This function does not return.
853 * This function is called once at boot time.
854 */
855void thread_become_idle(void)
856{
857 DEBUG_ASSERT(arch_ints_disabled());
858
859 thread_t *t = get_current_thread();
860
861#if WITH_SMP
862 char name[16];
863 snprintf(name, sizeof(name), "idle %d", arch_curr_cpu_num());
864 thread_set_name(name);
865#else
866 thread_set_name("idle");
867#endif
868
869 /* mark ourself as idle */
870 t->priority = IDLE_PRIORITY;
871 t->flags |= THREAD_FLAG_IDLE;
872 thread_set_pinned_cpu(t, arch_curr_cpu_num());
873
874 mp_set_curr_cpu_active(true);
875 mp_set_cpu_idle(arch_curr_cpu_num());
876
877 /* enable interrupts and start the scheduler */
878 arch_enable_ints();
879 thread_yield();
880
881 idle_thread_routine();
882}
883
884/* create an idle thread for the cpu we're on, and start scheduling */
885
886void thread_secondary_cpu_init_early(void)
887{
888 DEBUG_ASSERT(arch_ints_disabled());
889
890 /* construct an idle thread to cover our cpu */
891 uint cpu = arch_curr_cpu_num();
892 thread_t *t = idle_thread(cpu);
893
894 char name[16];
895 snprintf(name, sizeof(name), "idle %u", cpu);
896 init_thread_struct(t, name);
897 thread_set_pinned_cpu(t, cpu);
898
899 /* half construct this thread, since we're already running */
900 t->priority = HIGHEST_PRIORITY;
901 t->state = THREAD_RUNNING;
902 t->flags = THREAD_FLAG_DETACHED | THREAD_FLAG_IDLE;
903 thread_set_curr_cpu(t, cpu);
904 thread_set_pinned_cpu(t, cpu);
905 wait_queue_init(&t->retcode_wait_queue);
906
907 THREAD_LOCK(state);
908
909 list_add_head(&thread_list, &t->thread_list_node);
910 set_current_thread(t);
911
912 THREAD_UNLOCK(state);
913}
914
915void thread_secondary_cpu_entry(void)
916{
917 uint cpu = arch_curr_cpu_num();
918 thread_t *t = get_current_thread();
919 t->priority = IDLE_PRIORITY;
920
921 mp_set_curr_cpu_active(true);
922 mp_set_cpu_idle(cpu);
923
924 /* enable interrupts and start the scheduler on this cpu */
925 arch_enable_ints();
926 thread_yield();
927
928 idle_thread_routine();
929}
930
931static const char *thread_state_to_str(enum thread_state state)
932{
933 switch (state) {
934 case THREAD_SUSPENDED: return "susp";
935 case THREAD_READY: return "rdy";
936 case THREAD_RUNNING: return "run";
937 case THREAD_BLOCKED: return "blok";
938 case THREAD_SLEEPING: return "slep";
939 case THREAD_DEATH: return "deth";
940 default: return "unkn";
941 }
942}
943
944/**
945 * @brief Dump debugging info about the specified thread.
946 */
947void dump_thread(thread_t *t)
948{
949 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
950#if WITH_SMP
951 dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
952 thread_state_to_str(t->state), t->curr_cpu, t->pinned_cpu, t->priority, t->remaining_quantum);
953#else
954 dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
955 thread_state_to_str(t->state), t->priority, t->remaining_quantum);
956#endif
957 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
958 dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
959 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
960#if (MAX_TLS_ENTRY > 0)
961 dprintf(INFO, "\ttls:");
962 int i;
963 for (i=0; i < MAX_TLS_ENTRY; i++) {
964 dprintf(INFO, " 0x%lx", t->tls[i]);
965 }
966 dprintf(INFO, "\n");
967#endif
968 arch_dump_thread(t);
969}
970
971/**
972 * @brief Dump debugging info about all threads
973 */
974void dump_all_threads(void)
975{
976 thread_t *t;
977
978 THREAD_LOCK(state);
979 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
980 if (t->magic != THREAD_MAGIC) {
981 dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
982 hexdump(t, sizeof(thread_t));
983 break;
984 }
985 dump_thread(t);
986 }
987 THREAD_UNLOCK(state);
988}
989
990/** @} */
991
992
993/**
994 * @defgroup wait Wait Queue
995 * @{
996 */
997void wait_queue_init(wait_queue_t *wait)
998{
999 *wait = (wait_queue_t)WAIT_QUEUE_INITIAL_VALUE(*wait);
1000}
1001
1002static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t now, void *arg)
1003{
1004 thread_t *thread = (thread_t *)arg;
1005
1006 DEBUG_ASSERT(thread->magic == THREAD_MAGIC);
1007
1008 spin_lock(&thread_lock);
1009
1010 enum handler_return ret = INT_NO_RESCHEDULE;
1011 if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
1012 ret = INT_RESCHEDULE;
1013 }
1014
1015 spin_unlock(&thread_lock);
1016
1017 return ret;
1018}
1019
1020/**
1021 * @brief Block until a wait queue is notified.
1022 *
1023 * This function puts the current thread at the end of a wait
1024 * queue and then blocks until some other thread wakes the queue
1025 * up again.
1026 *
1027 * @param wait The wait queue to enter
1028 * @param timeout The maximum time, in ms, to wait
1029 *
1030 * If the timeout is zero, this function returns immediately with
1031 * ERR_TIMED_OUT. If the timeout is INFINITE_TIME, this function
1032 * waits indefinitely. Otherwise, this function returns with
1033 * ERR_TIMED_OUT at the end of the timeout period.
1034 *
1035 * @return ERR_TIMED_OUT on timeout, else returns the return
1036 * value specified when the queue was woken by wait_queue_wake_one().
1037 */
1038status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
1039{
1040 timer_t timer;
1041
1042 thread_t *current_thread = get_current_thread();
1043
1044 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1045 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1046 DEBUG_ASSERT(arch_ints_disabled());
1047 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1048
1049 if (timeout == 0)
1050 return ERR_TIMED_OUT;
1051
1052 list_add_tail(&wait->list, &current_thread->queue_node);
1053 wait->count++;
1054 current_thread->state = THREAD_BLOCKED;
1055 current_thread->blocking_wait_queue = wait;
1056 current_thread->wait_queue_block_ret = NO_ERROR;
1057
1058 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
1059 if (timeout != INFINITE_TIME) {
1060 timer_initialize(&timer);
1061 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
1062 }
1063
1064 thread_resched();
1065
1066 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
1067 if (timeout != INFINITE_TIME) {
1068 timer_cancel(&timer);
1069 }
1070
1071 return current_thread->wait_queue_block_ret;
1072}
1073
1074/**
1075 * @brief Wake up one thread sleeping on a wait queue
1076 *
1077 * This function removes one thread (if any) from the head of the wait queue and
1078 * makes it executable. The new thread will be placed at the head of the
1079 * run queue.
1080 *
1081 * @param wait The wait queue to wake
1082 * @param reschedule If true, the newly-woken thread will run immediately.
1083 * @param wait_queue_error The return value which the new thread will receive
1084 * from wait_queue_block().
1085 *
1086 * @return The number of threads woken (zero or one)
1087 */
1088int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
1089{
1090 thread_t *t;
1091 int ret = 0;
1092
1093 thread_t *current_thread = get_current_thread();
1094
1095 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1096 DEBUG_ASSERT(arch_ints_disabled());
1097 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1098
1099 t = list_remove_head_type(&wait->list, thread_t, queue_node);
1100 if (t) {
1101 wait->count--;
1102 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1103 t->state = THREAD_READY;
1104 t->wait_queue_block_ret = wait_queue_error;
1105 t->blocking_wait_queue = NULL;
1106
1107 /* if we're instructed to reschedule, stick the current thread on the head
1108 * of the run queue first, so that the newly awakened thread gets a chance to run
1109 * before the current one, but the current one doesn't get unnecessarilly punished.
1110 */
1111 if (reschedule) {
1112 current_thread->state = THREAD_READY;
1113 insert_in_run_queue_head(current_thread);
1114 }
1115 insert_in_run_queue_head(t);
1116 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
1117 if (reschedule) {
1118 thread_resched();
1119 }
1120 ret = 1;
1121
1122 }
1123
1124 return ret;
1125}
1126
1127
1128/**
1129 * @brief Wake all threads sleeping on a wait queue
1130 *
1131 * This function removes all threads (if any) from the wait queue and
1132 * makes them executable. The new threads will be placed at the head of the
1133 * run queue.
1134 *
1135 * @param wait The wait queue to wake
1136 * @param reschedule If true, the newly-woken threads will run immediately.
1137 * @param wait_queue_error The return value which the new thread will receive
1138 * from wait_queue_block().
1139 *
1140 * @return The number of threads woken (zero or one)
1141 */
1142int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
1143{
1144 thread_t *t;
1145 int ret = 0;
1146
1147 thread_t *current_thread = get_current_thread();
1148
1149 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1150 DEBUG_ASSERT(arch_ints_disabled());
1151 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1152
1153 if (reschedule && wait->count > 0) {
1154 /* if we're instructed to reschedule, stick the current thread on the head
1155 * of the run queue first, so that the newly awakened threads get a chance to run
1156 * before the current one, but the current one doesn't get unnecessarilly punished.
1157 */
1158 current_thread->state = THREAD_READY;
1159 insert_in_run_queue_head(current_thread);
1160 }
1161
1162 /* pop all the threads off the wait queue into the run queue */
1163 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
1164 wait->count--;
1165 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1166 t->state = THREAD_READY;
1167 t->wait_queue_block_ret = wait_queue_error;
1168 t->blocking_wait_queue = NULL;
1169
1170 insert_in_run_queue_head(t);
1171 ret++;
1172 }
1173
1174 DEBUG_ASSERT(wait->count == 0);
1175
1176 if (ret > 0) {
1177 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
1178 if (reschedule) {
1179 thread_resched();
1180 }
1181 }
1182
1183 return ret;
1184}
1185
1186/**
1187 * @brief Free all resources allocated in wait_queue_init()
1188 *
1189 * If any threads were waiting on this queue, they are all woken.
1190 */
1191void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
1192{
1193 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1194 DEBUG_ASSERT(arch_ints_disabled());
1195 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1196
1197 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
1198 wait->magic = 0;
1199}
1200
1201/**
1202 * @brief Wake a specific thread in a wait queue
1203 *
1204 * This function extracts a specific thread from a wait queue, wakes it, and
1205 * puts it at the head of the run queue.
1206 *
1207 * @param t The thread to wake
1208 * @param wait_queue_error The return value which the new thread will receive
1209 * from wait_queue_block().
1210 *
1211 * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
1212 */
1213status_t thread_unblock_from_wait_queue(thread_t *t, status_t wait_queue_error)
1214{
1215 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1216 DEBUG_ASSERT(arch_ints_disabled());
1217 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1218
1219 if (t->state != THREAD_BLOCKED)
1220 return ERR_NOT_BLOCKED;
1221
1222 DEBUG_ASSERT(t->blocking_wait_queue != NULL);
1223 DEBUG_ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
1224 DEBUG_ASSERT(list_in_list(&t->queue_node));
1225
1226 list_delete(&t->queue_node);
1227 t->blocking_wait_queue->count--;
1228 t->blocking_wait_queue = NULL;
1229 t->state = THREAD_READY;
1230 t->wait_queue_block_ret = wait_queue_error;
1231 insert_in_run_queue_head(t);
1232 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
1233
1234 return NO_ERROR;
1235}
1236
1237#if defined(WITH_DEBUGGER_INFO)
1238// This is, by necessity, arch-specific, and arm-m specific right now,
1239// but lives here due to thread_list being static.
1240//
1241// It contains sufficient information for a remote debugger to walk
1242// the thread list without needing the symbols and debug sections in
1243// the elf binary for lk or the ability to parse them.
1244const struct __debugger_info__ {
1245 u32 version; // flags:16 major:8 minor:8
1246 void *thread_list_ptr;
1247 void *current_thread_ptr;
1248 u8 off_list_node;
1249 u8 off_state;
1250 u8 off_saved_sp;
1251 u8 off_was_preempted;
1252 u8 off_name;
1253 u8 off_waitq;
1254} _debugger_info = {
1255 .version = 0x0100,
1256 .thread_list_ptr = &thread_list,
1257 .current_thread_ptr = &_current_thread,
1258 .off_list_node = __builtin_offsetof(thread_t, thread_list_node),
1259 .off_state = __builtin_offsetof(thread_t, state),
1260 .off_saved_sp = __builtin_offsetof(thread_t, arch.sp),
1261 .off_was_preempted = __builtin_offsetof(thread_t, arch.was_preempted),
1262 .off_name = __builtin_offsetof(thread_t, name),
1263 .off_waitq = __builtin_offsetof(thread_t, blocking_wait_queue),
1264};
1265#endif
1266
1267/* vim: set ts=4 sw=4 noexpandtab: */