1 /*
2  * Copyright (c) 2008-2015 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * @file
26  * @brief  Kernel threading
27  *
28  * This file is the core kernel threading interface.
29  *
30  * @defgroup thread Threads
31  * @{
32  */
33 #include <debug.h>
34 #include <assert.h>
35 #include <list.h>
36 #include <malloc.h>
37 #include <string.h>
38 #include <printf.h>
39 #include <err.h>
40 #include <lib/dpc.h>
41 #include <kernel/thread.h>
42 #include <kernel/timer.h>
43 #include <kernel/debug.h>
44 #include <kernel/mp.h>
45 #include <platform.h>
46 #include <target.h>
47 #include <lib/heap.h>
48 #include <lib/rand/rand.h>
49 #include <inttypes.h>
50 #if WITH_KERNEL_VM
51 #include <kernel/vm.h>
52 #endif
53 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
54 #include <trusty/libc_state.h>
55 #endif
56 
57 #if THREAD_STATS
58 struct thread_stats thread_stats[SMP_MAX_CPUS];
59 #endif
60 
61 #define STACK_DEBUG_BYTE (0x99)
62 #define STACK_DEBUG_WORD (0x99999999)
63 
64 #define DEBUG_THREAD_CONTEXT_SWITCH (0)
65 #define DEBUG_THREAD_CPU_WAKE (0)
66 #define DEBUG_THREAD_CPU_PIN (0)
67 
68 /* global thread list */
69 static struct list_node thread_list;
70 
71 /* master thread spinlock */
72 spin_lock_t thread_lock = SPIN_LOCK_INITIAL_VALUE;
73 
74 atomic_uint thread_lock_owner = SMP_MAX_CPUS;
75 
76 /* the run queue */
77 static struct list_node run_queue[NUM_PRIORITIES];
78 static uint32_t run_queue_bitmap;
79 
80 /* make sure the bitmap is large enough to cover our number of priorities */
81 STATIC_ASSERT(NUM_PRIORITIES <= sizeof(run_queue_bitmap) * 8);
82 
83 /* Priority of current thread running on cpu, or last signalled */
84 static int cpu_priority[SMP_MAX_CPUS];
85 
86 /* the idle thread(s) (statically allocated) */
87 #if WITH_SMP
88 static thread_t _idle_threads[SMP_MAX_CPUS];
89 #define idle_thread(cpu) (&_idle_threads[cpu])
90 #else
91 static thread_t _idle_thread;
92 #define idle_thread(cpu) (&_idle_thread)
93 #endif
94 
95 /* list of dead detached thread and wait queue to signal reaper */
96 static struct list_node dead_threads;
97 static struct wait_queue reaper_wait_queue;
98 
99 /* local routines */
100 static const char *thread_state_to_str(enum thread_state state);
101 static void thread_resched(void);
102 static void idle_thread_routine(void) __NO_RETURN;
103 static enum handler_return thread_timer_callback(struct timer *t,
104                                                  lk_time_ns_t now, void *arg);
105 
106 /* preemption timer */
107 static timer_t preempt_timer[SMP_MAX_CPUS];
108 
109 #define US2NS(us) ((us) * 1000ULL)
110 #define MS2NS(ms) (US2NS(ms) * 1000ULL)
111 
platform_cpu_priority_set(uint32_t cpu,uint32_t priority)112 __WEAK void platform_cpu_priority_set(uint32_t cpu, uint32_t priority) {
113     (void)cpu;
114     (void)priority;
115 }
116 
117 /* run queue manipulation */
insert_in_run_queue_head(thread_t * t)118 static void insert_in_run_queue_head(thread_t *t)
119 {
120     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
121     DEBUG_ASSERT(t->state == THREAD_READY);
122     DEBUG_ASSERT(!list_in_list(&t->queue_node));
123     DEBUG_ASSERT(arch_ints_disabled());
124     DEBUG_ASSERT(thread_lock_held());
125 
126     list_add_head(&run_queue[t->priority], &t->queue_node);
127     run_queue_bitmap |= (1U<<t->priority);
128 }
129 
insert_in_run_queue_tail(thread_t * t)130 static void insert_in_run_queue_tail(thread_t *t)
131 {
132     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
133     DEBUG_ASSERT(t->state == THREAD_READY);
134     DEBUG_ASSERT(!list_in_list(&t->queue_node));
135     DEBUG_ASSERT(arch_ints_disabled());
136     DEBUG_ASSERT(thread_lock_held());
137 
138     list_add_tail(&run_queue[t->priority], &t->queue_node);
139     run_queue_bitmap |= (1U<<t->priority);
140 }
141 
delete_from_run_queue(thread_t * t)142 static void delete_from_run_queue(thread_t *t) {
143     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
144     DEBUG_ASSERT(t->state == THREAD_READY);
145     DEBUG_ASSERT(list_in_list(&t->queue_node));
146     DEBUG_ASSERT(arch_ints_disabled());
147     DEBUG_ASSERT(thread_lock_held());
148 
149     list_delete(&t->queue_node);
150 
151     if (list_is_empty(&run_queue[t->priority]))
152         run_queue_bitmap &= ~(1U<<t->priority);
153 }
154 
155 /**
156  * thread_get_expected_cookie() - get expected cookie for a given thread
157  * @t: address of thread associated with the expected cookie
158  *
159  * Threads are expected to have the same cookie value modulo the effects of
160  * xor'ing the cookie with the address of the enclosing thread struct.
161  *
162  * Returns: expected cookie for thread t
163  */
thread_get_expected_cookie(const thread_t * t)164 static inline uint64_t thread_get_expected_cookie(const thread_t *t) {
165     /* undo xor with bootstrap thread address then xor with address of t */
166     return idle_thread(0)->cookie ^ (uint64_t)idle_thread(0) ^ (uint64_t)t;
167 }
168 
init_thread_struct(thread_t * t,const char * name)169 static void init_thread_struct(thread_t *t, const char *name)
170 {
171     memset(t, 0, sizeof(thread_t));
172     t->magic = THREAD_MAGIC;
173     /*
174      * The bootstrap thread holds expected cookie. If t is the bootstrap
175      * thread, the cookie has already been initialized so the following
176      * assignment is essentially a no-op.
177      */
178     t->cookie = thread_get_expected_cookie(t);
179     thread_set_pinned_cpu(t, -1);
180     strlcpy(t->name, name, sizeof(t->name));
181 }
182 
183 /**
184  * adjust_shadow_stack_base() - make shadow stack hit guard page if too small
185  * @base: pointer to the shadow stack allocation
186  * @size: size of the shadow stack. Can be less than memory allocated.
187  *
188  * Shadow stacks grow up and are followed by guard pages. Adjust the base
189  * so we'll hit the guard page if a thread needs more than the number of
190  * bytes requested. Call revert_shadow_stack_base to undo the adjustment.
191  *
192  * Return: pointer into shadow stack allocation iff size % PAGE_SIZE != 0
193  */
adjust_shadow_stack_base(uint8_t * base,size_t size)194 static void* adjust_shadow_stack_base(uint8_t *base, size_t size) {
195    size_t adjustment = round_up(size, PAGE_SIZE) - size;
196    return base + adjustment;
197 }
198 
199 /**
200  * revert_shadow_stack_base() - inverse of adjust_shadow_stack_base
201  * @base: pointer returned by adjust_shadow_stack_base
202  * @size: size passed to adjust_shadow_stack_base
203  *
204  * Return: original pointer returned by vmm_alloc
205  */
revert_shadow_stack_base(uint8_t * base,size_t size)206 static void* revert_shadow_stack_base(uint8_t *base, size_t size) {
207    size_t adjustment = round_up(size, PAGE_SIZE) - size;
208    return base - adjustment;
209 }
210 
211 /**
212  * @brief  Create a new thread
213  *
214  * This function creates a new thread.  The thread is initially suspended, so you
215  * need to call thread_resume() to execute it.
216  *
217  * @param  t           Allocate thread if NULL; reuse existing thread t otherwise
218  * @param  name        Name of thread
219  * @param  entry       Entry point of thread
220  * @param  arg         Arbitrary argument passed to entry()
221  * @param  priority    Execution priority for the thread
222  * @param  stack_size  Stack size for the thread
223  * @param  shadow_stack_size  Shadow stack size for the thread, if enabled
224  *
225  * Thread priority is an integer from 0 (lowest) to 31 (highest).  Some standard
226  * priorities are defined in <kernel/thread.h>:
227  *
228  *  HIGHEST_PRIORITY
229  *  DPC_PRIORITY
230  *  HIGH_PRIORITY
231  *  DEFAULT_PRIORITY
232  *  LOW_PRIORITY
233  *  IDLE_PRIORITY
234  *  LOWEST_PRIORITY
235  *
236  * Stack size is typically set to DEFAULT_STACK_SIZE
237  *
238  * @return  Pointer to thread object, or NULL on failure.
239  */
thread_create_etc(thread_t * t,const char * name,thread_start_routine entry,void * arg,int priority,void * stack,size_t stack_size,size_t shadow_stack_size)240 thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size, size_t shadow_stack_size)
241 {
242     int ret;
243     unsigned int flags = 0;
244 
245     ASSERT(priority > IDLE_PRIORITY);
246     ASSERT(priority <= HIGHEST_PRIORITY);
247 
248     if (!t) {
249         t = malloc(sizeof(thread_t));
250         if (!t)
251             return NULL;
252         flags |= THREAD_FLAG_FREE_STRUCT;
253     }
254 
255     init_thread_struct(t, name);
256 
257     t->entry = entry;
258     t->arg = arg;
259     t->priority = priority;
260     t->state = THREAD_SUSPENDED;
261     t->blocking_wait_queue = NULL;
262     t->wait_queue_block_ret = NO_ERROR;
263     thread_set_curr_cpu(t, -1);
264 
265     t->retcode = 0;
266     wait_queue_init(&t->retcode_wait_queue);
267 
268 #if WITH_KERNEL_VM
269     t->aspace = NULL;
270 #endif
271 
272     /* create the stack */
273     if (!stack) {
274         ret = vmm_alloc(vmm_get_kernel_aspace(), "kernel-stack", stack_size,
275                         &t->stack, 0, 0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
276         if (ret) {
277             if (flags & THREAD_FLAG_FREE_STRUCT)
278                 free(t);
279             return NULL;
280         }
281         flags |= THREAD_FLAG_FREE_STACK;
282     } else {
283         t->stack = stack;
284     }
285 #if THREAD_STACK_HIGHWATER
286     memset(t->stack, STACK_DEBUG_BYTE, stack_size);
287 #endif
288 
289     t->stack_high = t->stack + stack_size;
290     t->stack_size = stack_size;
291 
292 #if KERNEL_SCS_ENABLED
293     /* shadow stacks can only store an integral number of return addresses */
294     t->shadow_stack_size = round_up(shadow_stack_size, sizeof(vaddr_t));
295     ret = vmm_alloc(vmm_get_kernel_aspace(), "kernel-shadow-stack",
296                     t->shadow_stack_size, &t->shadow_stack, PAGE_SIZE_SHIFT,
297                     0, ARCH_MMU_FLAG_PERM_NO_EXECUTE);
298     if (ret) {
299         if (flags & THREAD_FLAG_FREE_STACK)
300             free(t->stack);
301         if (flags & THREAD_FLAG_FREE_STRUCT)
302             free(t);
303         return NULL;
304     }
305     flags |= THREAD_FLAG_FREE_SHADOW_STACK;
306 
307     t->shadow_stack = adjust_shadow_stack_base(t->shadow_stack,
308                                                t->shadow_stack_size);
309 #endif
310 
311     /* save whether or not we need to free the thread struct and/or stack */
312     t->flags = flags;
313 
314     /* inheirit thread local storage from the parent */
315     thread_t *current_thread = get_current_thread();
316     int i;
317     for (i=0; i < MAX_TLS_ENTRY; i++)
318         t->tls[i] = current_thread->tls[i];
319 
320 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
321     /* set up thread-local libc info */
322     ret = libc_state_thread_init(t);
323     if (ret != NO_ERROR) {
324 #if KERNEL_SCS_ENABLED
325         if (flags & THREAD_FLAG_FREE_SHADOW_STACK) {
326             free(t->shadow_stack);
327         }
328 #endif
329         if (flags & THREAD_FLAG_FREE_STACK) {
330             free(t->stack);
331         }
332         if (flags & THREAD_FLAG_FREE_STRUCT) {
333             free(t);
334         }
335         return NULL;
336     }
337     flags |= THREAD_FLAG_FREE_LIBC_STATE;
338 #endif
339 
340     /* set up the initial stack frame */
341     arch_thread_initialize(t);
342 
343     /* add it to the global thread list */
344     THREAD_LOCK(state);
345     list_add_head(&thread_list, &t->thread_list_node);
346     THREAD_UNLOCK(state);
347 
348     return t;
349 }
350 
thread_create(const char * name,thread_start_routine entry,void * arg,int priority,size_t stack_size)351 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
352 {
353     return thread_create_etc(NULL, name, entry, arg, priority, NULL,
354                              stack_size, DEFAULT_SHADOW_STACK_SIZE);
355 }
356 
357 /**
358  * @brief Flag a thread as real time
359  *
360  * @param t Thread to flag
361  *
362  * @return NO_ERROR on success
363  */
thread_set_real_time(thread_t * t)364 status_t thread_set_real_time(thread_t *t)
365 {
366     if (!t)
367         return ERR_INVALID_ARGS;
368 
369     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
370 
371     THREAD_LOCK(state);
372     if (t == get_current_thread()) {
373         /* if we're currently running, cancel the preemption timer. */
374         timer_cancel(&preempt_timer[arch_curr_cpu_num()]);
375     }
376     t->flags |= THREAD_FLAG_REAL_TIME;
377     THREAD_UNLOCK(state);
378 
379     return NO_ERROR;
380 }
381 
thread_is_realtime(thread_t * t)382 static bool thread_is_realtime(thread_t *t)
383 {
384     return (t->flags & THREAD_FLAG_REAL_TIME) && t->priority > DEFAULT_PRIORITY;
385 }
386 
thread_is_idle(thread_t * t)387 static bool thread_is_idle(thread_t *t)
388 {
389     return !!(t->flags & THREAD_FLAG_IDLE);
390 }
391 
thread_is_real_time_or_idle(thread_t * t)392 static bool thread_is_real_time_or_idle(thread_t *t)
393 {
394     return !!(t->flags & (THREAD_FLAG_REAL_TIME | THREAD_FLAG_IDLE));
395 }
396 
thread_get_mp_reschedule_target(thread_t * current_thread,thread_t * t)397 static mp_cpu_mask_t thread_get_mp_reschedule_target(thread_t *current_thread, thread_t *t)
398 {
399 #if WITH_SMP
400     uint cpu = arch_curr_cpu_num();
401     uint target_cpu;
402 
403     if (t->pinned_cpu != -1 && current_thread->pinned_cpu == t->pinned_cpu)
404         return 0;
405 
406     if (t->pinned_cpu == -1 || (uint)t->pinned_cpu == cpu)
407         return 0;
408 
409     target_cpu = (uint)t->pinned_cpu;
410 
411     ASSERT(target_cpu < SMP_MAX_CPUS);
412 
413     if (t->priority < cpu_priority[target_cpu]) {
414         /*
415          * The thread is pinned to a cpu that is already running, or has already
416          * been signalled to run, a higher priority thread. No ipi is needed.
417          */
418 #if DEBUG_THREAD_CPU_WAKE
419         dprintf(ALWAYS, "%s: cpu %d, don't wake cpu %d, priority %d for priority %d thread (current priority %d)\n",
420             __func__, cpu, target_cpu, cpu_priority[target_cpu], t->priority, current_thread->priority);
421 #endif
422         return 0;
423     }
424 
425 #if DEBUG_THREAD_CPU_WAKE
426     dprintf(ALWAYS, "%s: cpu %d, wake cpu %d, priority %d for priority %d thread (current priority %d)\n",
427         __func__, cpu, target_cpu, cpu_priority[target_cpu], t->priority, current_thread->priority);
428 #endif
429     /*
430      * Pretend the target CPU is already running the thread so we don't send it
431      * another ipi for a lower priority thread. This is most important if that
432      * thread can run on another CPU instead.
433      */
434     cpu_priority[target_cpu] = t->priority;
435     platform_cpu_priority_set(target_cpu, t->priority);
436 
437     return 1UL << target_cpu;
438 #else
439     return 0;
440 #endif
441 }
442 
thread_mp_reschedule(thread_t * current_thread,thread_t * t)443 static void thread_mp_reschedule(thread_t *current_thread, thread_t *t)
444 {
445     mp_reschedule(thread_get_mp_reschedule_target(current_thread, t), 0);
446 }
447 
thread_init_cookie(thread_t * t)448 static void thread_init_cookie(thread_t *t) {
449     int rc = rand_get_bytes((uint8_t *)&t->cookie, sizeof(t->cookie));
450     /* tie the expected cookie to the address of the enclosing thread */
451     t->cookie ^= (uint64_t)t;
452     ASSERT(!rc && "Failed to initialize thread cookie.");
453 }
454 
thread_check_cookie(const thread_t * t)455 static void thread_check_cookie(const thread_t *t) {
456     const uint64_t expected_cookie = thread_get_expected_cookie(t);
457 
458     if (unlikely(t->cookie != expected_cookie)) {
459         /*
460          * It is not safe to call panic with interrupts enabled because
461          * backtracing calls printf which may block the current thread and put
462          * it on the runqueues with an invalid cookie - causing a double panic.
463          */
464         arch_disable_ints();
465         panic("Corrupt or invalid thread cookie detected for thread: %s.\n"
466               "Expected cookie: %" PRIu64 ", actual cookie: %" PRIu64 "\n",
467               t->name,
468               expected_cookie,
469               t->cookie
470         );
471     }
472 }
473 
474 /**
475  * @brief  Make a suspended thread executable.
476  *
477  * This function is typically called to start a thread which has just been
478  * created with thread_create()
479  *
480  * @param t  Thread to resume
481  *
482  * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
483  */
thread_resume(thread_t * t)484 status_t thread_resume(thread_t *t)
485 {
486     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
487     DEBUG_ASSERT(t->state != THREAD_DEATH);
488 
489     thread_check_cookie(t);
490 
491     bool resched = false;
492     bool ints_disabled = arch_ints_disabled();
493     THREAD_LOCK(state);
494     if (t->state == THREAD_SUSPENDED) {
495         t->state = THREAD_READY;
496         insert_in_run_queue_head(t);
497         if (!ints_disabled) /* HACK, don't resced into bootstrap thread before idle thread is set up */
498             resched = true;
499     }
500 
501     thread_mp_reschedule(get_current_thread(), t);
502 
503     THREAD_UNLOCK(state);
504 
505     if (resched)
506         thread_yield();
507 
508     return NO_ERROR;
509 }
510 
thread_detach_and_resume(thread_t * t)511 status_t thread_detach_and_resume(thread_t *t)
512 {
513     status_t err;
514     err = thread_detach(t);
515     if (err < 0)
516         return err;
517     return thread_resume(t);
518 }
519 
thread_free(thread_t * t)520 static void thread_free(thread_t *t)
521 {
522 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
523     int ret;
524 
525     /* free thread-local libc info */
526     if (t->flags & THREAD_FLAG_FREE_LIBC_STATE) {
527         ret = libc_state_thread_free(t);
528         DEBUG_ASSERT(ret == NO_ERROR);
529     }
530 #endif
531 
532     /* free its stack and the thread structure itself */
533     if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
534         vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)t->stack);
535 
536 #if KERNEL_SCS_ENABLED
537     if (t->flags & THREAD_FLAG_FREE_SHADOW_STACK) {
538         /* each thread has a shadow stack when the mitigation is enabled */
539         DEBUG_ASSERT(t->shadow_stack);
540         /* get back the pointer returned by vmm_alloc by undoing adjustment */
541         t->shadow_stack = revert_shadow_stack_base(t->shadow_stack,
542                                                    t->shadow_stack_size);
543         vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)t->shadow_stack);
544     }
545 #endif
546 
547     if (t->flags & THREAD_FLAG_FREE_STRUCT) {
548         free(t);
549     }
550 }
551 
thread_join(thread_t * t,int * retcode,lk_time_t timeout)552 status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout)
553 {
554     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
555 
556     THREAD_LOCK(state);
557 
558     if (t->flags & THREAD_FLAG_DETACHED) {
559         /* the thread is detached, go ahead and exit */
560         THREAD_UNLOCK(state);
561         return ERR_THREAD_DETACHED;
562     }
563 
564     /* wait for the thread to die */
565     if (t->state != THREAD_DEATH) {
566         status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
567         if (err < 0) {
568             THREAD_UNLOCK(state);
569             return err;
570         }
571     }
572 
573     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
574     DEBUG_ASSERT(t->state == THREAD_DEATH);
575     DEBUG_ASSERT(t->blocking_wait_queue == NULL);
576     DEBUG_ASSERT(!list_in_list(&t->queue_node));
577 
578     /* save the return code */
579     if (retcode)
580         *retcode = t->retcode;
581 
582     /* remove it from the master thread list */
583     list_delete(&t->thread_list_node);
584 
585     /* clear the structure's magic */
586     t->magic = 0;
587 
588     THREAD_UNLOCK(state);
589 
590     thread_free(t);
591 
592     return NO_ERROR;
593 }
594 
thread_detach(thread_t * t)595 status_t thread_detach(thread_t *t)
596 {
597     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
598 
599     THREAD_LOCK(state);
600 
601     /* if another thread is blocked inside thread_join() on this thread,
602      * wake them up with a specific return code */
603     wait_queue_wake_all(&t->retcode_wait_queue, false, ERR_THREAD_DETACHED);
604 
605     /* if it's already dead, then just do what join would have and exit */
606     if (t->state == THREAD_DEATH) {
607         t->flags &= ~THREAD_FLAG_DETACHED; /* makes sure thread_join continues */
608         THREAD_UNLOCK(state);
609         return thread_join(t, NULL, 0);
610     } else {
611         t->flags |= THREAD_FLAG_DETACHED;
612         THREAD_UNLOCK(state);
613         return NO_ERROR;
614     }
615 }
616 
reaper_thread_routine(void * arg)617 static int reaper_thread_routine(void *arg)
618 {
619     THREAD_LOCK(state);
620     while (true) {
621         wait_queue_block(&reaper_wait_queue, INFINITE_TIME);
622 
623         while(true) {
624             thread_t *t = list_remove_head_type(&dead_threads,
625                                                 thread_t, thread_list_node);
626             if (!t) {
627                 break;
628             }
629             /* clear the structure's magic */
630             t->magic = 0;
631             THREAD_UNLOCK(state);
632             thread_free(t);
633             THREAD_LOCK(state);
634         }
635     }
636 }
637 
638 /**
639  * @brief  Terminate the current thread
640  *
641  * Current thread exits with the specified return code.
642  *
643  * This function does not return.
644  */
thread_exit(int retcode)645 void thread_exit(int retcode)
646 {
647     thread_t *current_thread = get_current_thread();
648 
649     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
650     DEBUG_ASSERT(!thread_is_idle(current_thread));
651     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
652 
653     thread_check_cookie(current_thread);
654 
655 //  dprintf("thread_exit: current %p\n", current_thread);
656 
657     THREAD_LOCK(state);
658 
659     /* enter the dead state */
660     current_thread->state = THREAD_DEATH;
661     current_thread->retcode = retcode;
662 
663     /* if we're detached, then do our teardown here */
664     if (current_thread->flags & THREAD_FLAG_DETACHED) {
665         /* remove it from the master thread list */
666         list_delete(&current_thread->thread_list_node);
667 
668         /* add it to list of threads to free and wake up reaper */
669         list_add_tail(&dead_threads, &current_thread->thread_list_node);
670         wait_queue_wake_all(&reaper_wait_queue, false, 0);
671     } else {
672         /* signal if anyone is waiting */
673         wait_queue_wake_all(&current_thread->retcode_wait_queue, false, 0);
674     }
675 
676     /* reschedule */
677     thread_resched();
678 
679     panic("somehow fell through thread_exit()\n");
680 }
681 
682 /**
683  * @brief  Terminate the current thread due to a panic
684  *
685  * Threads should not call this method directly. Instead, test threads can set
686  * the exit-on-panic flag to exit via this function rather than halting the
687  * system. This function should not be used outside of test builds.
688  *
689  * This function is similar to thread_exit insofar that it does not return.
690  */
thread_exit_from_panic(void)691 void thread_exit_from_panic(void) {
692     thread_t *current_thread = get_current_thread();
693 
694     DEBUG_ASSERT(current_thread->flags & THREAD_FLAG_EXIT_ON_PANIC);
695 
696     /* thread lock, if held, was released in _panic */
697     THREAD_LOCK(state);
698 
699     /* prevent potential infinite recursion in case we panic again */
700     current_thread->flags &= ~THREAD_FLAG_EXIT_ON_PANIC;
701 
702     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING ||
703                  current_thread->state == THREAD_READY);
704 
705 #if TEST_BUILD
706     /* ensure thread cookie is valid so we don't panic in thread_resched */
707     current_thread->cookie = thread_get_expected_cookie(current_thread);
708 #endif
709     /*
710      * Threads are typically in the THREAD_RUNNING state when they exit.
711      * However, if a thread with the THREAD_FLAG_EXIT_ON_PANIC flag set fails
712      * a cookie check in thread_resched, this function may run while the
713      * thread is still on the run queue with its state set to THREAD_READY.
714      *
715      * In test builds: Remove it from the run queue so it does not get
716      * rescheduled and to ensure that a thread_join operation on the
717      * panic'ed thread completes successfully. Set its state to THREAD_READY.
718      *
719      * Non-test builds shouldn't exercise this code path so we panic.
720      */
721     if (current_thread->state == THREAD_READY) {
722 #if TEST_BUILD
723         delete_from_run_queue(current_thread);
724         /* lie about the thread state to avoid tripping assert in thread_exit */
725         current_thread->state = THREAD_RUNNING;
726 #else
727         /* thread lock will be released in _panic */
728         panic("%s: tried to exit runnable kernel test thread", __func__);
729 #endif
730     }
731 
732     THREAD_UNLOCK(state);
733 
734     thread_exit(ERR_FAULT);
735 }
736 
platform_idle(void)737 __WEAK void platform_idle(void)
738 {
739     arch_idle();
740 }
741 
idle_thread_routine(void)742 static void idle_thread_routine(void)
743 {
744     for (;;)
745         platform_idle();
746 }
747 
get_top_thread(int cpu,bool unlink)748 static thread_t *get_top_thread(int cpu, bool unlink)
749 {
750     thread_t *newthread;
751     uint32_t local_run_queue_bitmap = run_queue_bitmap;
752 
753     while (local_run_queue_bitmap) {
754         /* find the first (remaining) queue with a thread in it */
755         uint next_queue = sizeof(run_queue_bitmap) * 8 - 1 - __builtin_clz(local_run_queue_bitmap);
756 
757         list_for_every_entry(&run_queue[next_queue], newthread, thread_t, queue_node) {
758 #if WITH_SMP
759             if (newthread->pinned_cpu < 0 || newthread->pinned_cpu == cpu)
760 #endif
761             {
762                 if (unlink) {
763                     delete_from_run_queue(newthread);
764                 }
765 
766                 return newthread;
767             }
768         }
769 
770         local_run_queue_bitmap &= ~(1U<<next_queue);
771     }
772 
773     /* No threads to run */
774     if (cpu < 0) {
775         /* no CPU has been selected, so we don't have an idle thread */
776         return NULL;
777     } else {
778         /* select the idle thread for this cpu */
779         return idle_thread(cpu);
780     }
781 }
782 
783 /**
784  * thread_pinned_cond_mp_reschedule() - handles a new pinned cpu
785  * when a thread is running or becomes ready.
786  * @current_thread:    Thread currently running on the cpu
787  * @thread:            Thread to be scheduled on a potentially
788  *                     updated pinned cpu
789  *
790  * When the pinned cpu of a thread is changed, the thread needs
791  * to be rescheduled on that new cpu.
792  *
793  * To achieve this, thread_resched() shall be invoked on the currently
794  * running cpu. Within thread_resched the pinned cpu can be checked against
795  * the current cpu and if different, an IPI shall be triggered on the
796  * new cpu. thread_pinned_cond_mp_reschedule() is the helper function
797  * invoked by thread_resched, checking above condition and conditionally
798  * invoking thread_mp_reschedule() to trigger the IPI.
799  *
800  * Notes:
801  * - If the thread is updating its own pinned cpu state,
802  * thread_set_pinned_cpu() invokes thread_preempt(), which invokes
803  * thread_resched() with current_thread set to the running thread.
804  * - If the thread is suspended/blocked/ready when its pinned cpu is updated,
805  * as soon as it transitions to ready,thread_resched() is invoked with
806  * current_thread set to the ready thread.
807  * - If the thread is sleeping when its pinned cpu is updated,
808  * thread_sleep_handler() is invoked on the cpu the thread went to sleep on.
809  * thread_sleep_handler() needs to invoke thread_pinned_cond_mp_reschedule()
810  * to trigger the IPI on the new pinned cpu.
811  */
thread_pinned_cond_mp_reschedule(thread_t * current_thread,thread_t * thread,uint cpu)812 static void thread_pinned_cond_mp_reschedule(thread_t* current_thread,
813                                              thread_t* thread,
814                                              uint cpu) {
815 #if WITH_SMP
816     if (unlikely((thread->pinned_cpu > -1) && (thread->pinned_cpu != (int)cpu))) {
817         DEBUG_ASSERT(thread->curr_cpu == (int)cpu || thread->curr_cpu == -1);
818 #if DEBUG_THREAD_CPU_PIN
819         dprintf(ALWAYS,
820                 "%s: arch_curr_cpu %d, thread %s: pinned_cpu %d, curr_cpu %d, state [%s]\n",
821                 __func__, arch_curr_cpu_num(), thread->name, thread->pinned_cpu,
822                 thread->curr_cpu, thread_state_to_str(thread->state));
823 #endif
824         thread_mp_reschedule(current_thread, thread);
825     }
826 #endif
827 }
828 
thread_cond_mp_reschedule(thread_t * current_thread,const char * caller)829 static void thread_cond_mp_reschedule(thread_t *current_thread, const char *caller)
830 {
831 #if WITH_SMP
832     int i;
833     uint best_cpu = ~0U;
834     int best_cpu_priority = INT_MAX;
835     thread_t *t = get_top_thread(-1, false);
836 
837     DEBUG_ASSERT(arch_ints_disabled());
838     DEBUG_ASSERT(thread_lock_held());
839 
840     for (i = 0; i < SMP_MAX_CPUS; i++) {
841         if (!mp_is_cpu_active(i))
842             continue;
843 
844         if (cpu_priority[i] < best_cpu_priority) {
845             best_cpu = i;
846             best_cpu_priority = cpu_priority[i];
847         }
848     }
849 
850     if (!t || (t->priority <= best_cpu_priority))
851         return;
852 
853 #if DEBUG_THREAD_CPU_WAKE
854     dprintf(ALWAYS, "%s from %s: cpu %d, wake cpu %d, priority %d for priority %d thread (%s), current %d (%s)\n",
855             __func__, caller, arch_curr_cpu_num(), best_cpu, best_cpu_priority,
856             t->priority, t->name,
857             current_thread->priority, current_thread->name);
858 #endif
859     cpu_priority[best_cpu] = t->priority;
860     platform_cpu_priority_set(best_cpu, t->priority);
861     mp_reschedule(1UL << best_cpu, 0);
862 #endif
863 }
864 
865 /**
866  * @brief  Cause another thread to be executed.
867  *
868  * Internal reschedule routine. The current thread needs to already be in whatever
869  * state and queues it needs to be in. This routine simply picks the next thread and
870  * switches to it.
871  *
872  * This is probably not the function you're looking for. See
873  * thread_yield() instead.
874  */
thread_resched(void)875 static void thread_resched(void) {
876     thread_t *oldthread;
877     thread_t *newthread;
878 
879     thread_t *current_thread = get_current_thread();
880     uint cpu = arch_curr_cpu_num();
881 
882     DEBUG_ASSERT(arch_ints_disabled());
883     DEBUG_ASSERT(thread_lock_held());
884     DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
885 
886     THREAD_STATS_INC(reschedules);
887 
888     /*
889      * These threads are already on the runqueues so these checks should pass
890      * unless an existing thread struct was corrupted. If that is the case, the
891      * thread cookies do not help much as an adversary could corrupt the
892      * register file for that thread instead.
893      */
894     thread_check_cookie(current_thread);
895 
896     newthread = get_top_thread(cpu, true);
897 
898     /*
899      * The current_thread is switched out from a given cpu,
900      * however its pinned cpu may have changed and if so,
901      * this current_thread should be scheduled on that new cpu.
902      */
903     thread_pinned_cond_mp_reschedule(newthread, current_thread, cpu);
904 
905     DEBUG_ASSERT(newthread);
906 
907     newthread->state = THREAD_RUNNING;
908 
909     oldthread = current_thread;
910 
911     if (newthread == oldthread) {
912         if (cpu_priority[cpu] != oldthread->priority) {
913             /*
914              * When we try to wake up a CPU to run a specific thread, we record
915              * the priority of that thread so we don't request the same CPU
916              * again for a lower priority thread. If another CPU picks up that
917              * thread before the CPU we sent the wake-up IPI gets to the
918              * scheduler it will may to the early return path here. Reset this
919              * priority value before returning.
920              */
921 #if DEBUG_THREAD_CPU_WAKE
922             dprintf(ALWAYS, "%s: cpu %d, reset cpu priority %d -> %d\n",
923                 __func__, cpu, cpu_priority[cpu], newthread->priority);
924 #endif
925             cpu_priority[cpu] = newthread->priority;
926             platform_cpu_priority_set(cpu, newthread->priority);
927         }
928         return;
929     }
930 
931     /* set up quantum for the new thread if it was consumed */
932     if (newthread->remaining_quantum <= 0) {
933         newthread->remaining_quantum = 5; // XXX make this smarter
934     }
935 
936     /* mark the cpu ownership of the threads */
937     thread_set_curr_cpu(oldthread, -1);
938     thread_set_curr_cpu(newthread, cpu);
939 
940 #if WITH_SMP
941     if (thread_is_idle(newthread)) {
942         mp_set_cpu_idle(cpu);
943     } else {
944         mp_set_cpu_busy(cpu);
945     }
946 
947     if (thread_is_realtime(newthread)) {
948         mp_set_cpu_realtime(cpu);
949     } else {
950         mp_set_cpu_non_realtime(cpu);
951     }
952 #endif
953 
954 #if THREAD_STATS
955     THREAD_STATS_INC(context_switches);
956 
957     if (thread_is_idle(oldthread)) {
958         lk_time_ns_t now = current_time_ns();
959         thread_stats[cpu].idle_time += now - thread_stats[cpu].last_idle_timestamp;
960     }
961     if (thread_is_idle(newthread)) {
962         thread_stats[cpu].last_idle_timestamp = current_time_ns();
963     }
964 #endif
965 
966     KEVLOG_THREAD_SWITCH(oldthread, newthread);
967 
968     if (thread_is_real_time_or_idle(newthread)) {
969         thread_cond_mp_reschedule(newthread, __func__);
970         if (!thread_is_real_time_or_idle(oldthread)) {
971             /* if we're switching from a non real time to a real time, cancel
972              * the preemption timer. */
973 #if DEBUG_THREAD_CONTEXT_SWITCH
974             dprintf(ALWAYS, "arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
975                     cpu, oldthread, oldthread->name, newthread, newthread->name);
976 #endif
977             timer_cancel(&preempt_timer[cpu]);
978         }
979     } else if (thread_is_real_time_or_idle(oldthread)) {
980         /* if we're switching from a real time (or idle thread) to a regular one,
981          * set up a periodic timer to run our preemption tick. */
982 #if DEBUG_THREAD_CONTEXT_SWITCH
983         dprintf(ALWAYS, "arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
984                 cpu, oldthread, oldthread->name, newthread, newthread->name);
985 #endif
986         timer_set_periodic_ns(&preempt_timer[cpu], MS2NS(10),
987                               thread_timer_callback, NULL);
988     }
989 
990     /* set some optional target debug leds */
991     target_set_debug_led(0, !thread_is_idle(newthread));
992 
993     /* do the switch */
994     cpu_priority[cpu] = newthread->priority;
995     platform_cpu_priority_set(cpu, newthread->priority);
996     set_current_thread(newthread);
997 
998     thread_check_cookie(newthread);
999 
1000 #if DEBUG_THREAD_CONTEXT_SWITCH
1001     dprintf(ALWAYS, "arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
1002             cpu, oldthread, oldthread->name, oldthread->priority,
1003             oldthread->flags, newthread, newthread->name,
1004             newthread->priority, newthread->flags);
1005 #endif
1006 
1007 #if WITH_KERNEL_VM
1008     /* see if we need to swap mmu context */
1009     if (newthread->aspace != oldthread->aspace) {
1010         vmm_context_switch(oldthread->aspace, newthread->aspace);
1011     }
1012 #endif
1013 
1014     /* do the low level context switch */
1015     arch_context_switch(oldthread, newthread);
1016 }
1017 
1018 /**
1019  * @brief Yield the cpu to another thread
1020  *
1021  * This function places the current thread at the end of the run queue
1022  * and yields the cpu to another waiting thread (if any.)
1023  *
1024  * This function will return at some later time. Possibly immediately if
1025  * no other threads are waiting to execute.
1026  */
thread_yield(void)1027 void thread_yield(void)
1028 {
1029     thread_t *current_thread = get_current_thread();
1030 
1031     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
1032     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1033 
1034     THREAD_LOCK(state);
1035 
1036     THREAD_STATS_INC(yields);
1037 
1038     /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
1039     current_thread->state = THREAD_READY;
1040     current_thread->remaining_quantum = 0;
1041     if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
1042         insert_in_run_queue_tail(current_thread);
1043     }
1044     thread_resched();
1045 
1046     THREAD_UNLOCK(state);
1047 }
1048 
1049 /**
1050  * @brief  Briefly yield cpu to another thread
1051  *
1052  * This function is similar to thread_yield(), except that it will
1053  * restart more quickly.
1054  *
1055  * This function places the current thread at the head of the run
1056  * queue and then yields the cpu to another thread.
1057  *
1058  * Exception:  If the time slice for this thread has expired, then
1059  * the thread goes to the end of the run queue.
1060  *
1061  * This function will return at some later time. Possibly immediately if
1062  * no other threads are waiting to execute.
1063  */
thread_preempt_inner(bool lock_held)1064 static void thread_preempt_inner(bool lock_held)
1065 {
1066     thread_t *current_thread = get_current_thread();
1067 
1068     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
1069     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1070 
1071 #if THREAD_STATS
1072     if (!thread_is_idle(current_thread))
1073         THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
1074 #endif
1075 
1076     KEVLOG_THREAD_PREEMPT(current_thread);
1077 
1078     spin_lock_saved_state_t state;
1079     if (!lock_held) {
1080         /* thread lock */
1081         spin_lock_irqsave(&thread_lock, state);
1082         thread_lock_complete();
1083     }
1084 
1085     /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
1086     current_thread->state = THREAD_READY;
1087     if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
1088         if (current_thread->remaining_quantum > 0)
1089             insert_in_run_queue_head(current_thread);
1090         else
1091             insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
1092     }
1093     thread_resched();
1094 
1095     if (!lock_held) {
1096         THREAD_UNLOCK(state);
1097     }
1098 }
1099 
thread_preempt(void)1100 void thread_preempt(void)
1101 {
1102     /*
1103      * We refrain from asserting that the thread lock is not held due to
1104      * performance concerns as this legacy function is called frequently and
1105      * its usage context is not updated.
1106      * DEBUG_ASSERT(!thread_lock_held());
1107      */
1108     thread_preempt_inner(false);
1109 }
1110 
thread_preempt_lock_held(void)1111 void thread_preempt_lock_held(void)
1112 {
1113     DEBUG_ASSERT(thread_lock_held());
1114     thread_preempt_inner(true);
1115 }
1116 
1117 /**
1118  * @brief  Suspend thread until woken.
1119  *
1120  * This function schedules another thread to execute.  This function does not
1121  * return until the thread is made runable again by some other module.
1122  *
1123  * You probably don't want to call this function directly; it's meant to be called
1124  * from other modules, such as mutex, which will presumably set the thread's
1125  * state to blocked and add it to some queue or another.
1126  */
thread_block(void)1127 void thread_block(void)
1128 {
1129     __UNUSED thread_t *current_thread = get_current_thread();
1130 
1131     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
1132     DEBUG_ASSERT(current_thread->state == THREAD_BLOCKED);
1133     DEBUG_ASSERT(thread_lock_held());
1134     DEBUG_ASSERT(!thread_is_idle(current_thread));
1135 
1136     /* we are blocking on something. the blocking code should have already stuck us on a queue */
1137     thread_resched();
1138 }
1139 
thread_unblock(thread_t * t,bool resched)1140 void thread_unblock(thread_t *t, bool resched)
1141 {
1142     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1143     DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1144     DEBUG_ASSERT(thread_lock_held());
1145     DEBUG_ASSERT(!thread_is_idle(t));
1146 
1147     thread_check_cookie(t);
1148 
1149     t->state = THREAD_READY;
1150     insert_in_run_queue_head(t);
1151     thread_mp_reschedule(get_current_thread(), t);
1152     if (resched)
1153         thread_resched();
1154 }
1155 
thread_timer_callback(struct timer * t,lk_time_ns_t now,void * arg)1156 static enum handler_return thread_timer_callback(struct timer *t, lk_time_ns_t now,
1157                                               void *arg)
1158 {
1159     thread_t *current_thread = get_current_thread();
1160 
1161     if (thread_is_idle(current_thread))
1162         return INT_NO_RESCHEDULE;
1163 
1164     THREAD_LOCK(state);
1165     thread_cond_mp_reschedule(current_thread, __func__);
1166     THREAD_UNLOCK(state);
1167 
1168     if (thread_is_real_time_or_idle(current_thread))
1169         return INT_NO_RESCHEDULE;
1170 
1171     current_thread->remaining_quantum--;
1172     if (current_thread->remaining_quantum <= 0) {
1173         return INT_RESCHEDULE;
1174     } else {
1175         return INT_NO_RESCHEDULE;
1176     }
1177 }
1178 
1179 /* timer callback to wake up a sleeping thread */
thread_sleep_handler(timer_t * timer,lk_time_ns_t now,void * arg)1180 static enum handler_return thread_sleep_handler(timer_t *timer,
1181                                                 lk_time_ns_t now, void *arg)
1182 {
1183     thread_t *t = (thread_t *)arg;
1184 
1185     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1186     DEBUG_ASSERT(t->state == THREAD_SLEEPING);
1187 
1188     thread_check_cookie(t);
1189 
1190     THREAD_LOCK(state);
1191 
1192     t->state = THREAD_READY;
1193     insert_in_run_queue_head(t);
1194     /*
1195      * The awakened thread's thread_sleep_handler() is invoked
1196      * on the cpu the thread went to sleep on.
1197      * However the thread's pinned cpu may have changed
1198      * while the thread was asleep and if so,
1199      * this thread should be scheduled on the new pinned cpu.
1200      */
1201     thread_pinned_cond_mp_reschedule(
1202         get_current_thread(), t, arch_curr_cpu_num());
1203     THREAD_UNLOCK(state);
1204 
1205     return INT_RESCHEDULE;
1206 }
1207 
1208 /**
1209  * @brief  Put thread to sleep; delay specified in ms
1210  *
1211  * This function puts the current thread to sleep until the specified
1212  * delay in ns has expired.
1213  *
1214  * Note that this function could sleep for longer than the specified delay if
1215  * other threads are running.  When the timer expires, this thread will
1216  * be placed at the head of the run queue.
1217  */
thread_sleep_ns(lk_time_ns_t delay_ns)1218 void thread_sleep_ns(lk_time_ns_t delay_ns)
1219 {
1220     timer_t timer;
1221 
1222     thread_t *current_thread = get_current_thread();
1223 
1224     DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
1225     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1226     DEBUG_ASSERT(!thread_is_idle(current_thread));
1227 
1228     timer_initialize(&timer);
1229 
1230     THREAD_LOCK(state);
1231     timer_set_oneshot_ns(&timer, delay_ns, thread_sleep_handler,
1232                          (void *)current_thread);
1233     current_thread->state = THREAD_SLEEPING;
1234     thread_resched();
1235     THREAD_UNLOCK(state);
1236 
1237     /*
1238      * Make sure callback is not still running before timer goes out of scope as
1239      * it would corrupt the stack.
1240      */
1241     timer_cancel_sync(&timer);
1242 }
1243 
1244 /**
1245  * thread_sleep_until_ns - Put thread to sleep until specified time
1246  * @target_time_ns:  Time to sleep until.
1247  *
1248  * Sleep until current_time_ns() returns a value greater or equal than
1249  * @target_time_ns. If current_time_ns() is already greater or equal than
1250  * @target_time_ns return immediately.
1251  */
thread_sleep_until_ns(lk_time_ns_t target_time_ns)1252 void thread_sleep_until_ns(lk_time_ns_t target_time_ns)
1253 {
1254     lk_time_ns_t now_ns = current_time_ns();
1255     if (now_ns < target_time_ns) {
1256         /* TODO: Support absolute time in timer api and improve accuracy. */
1257         thread_sleep_ns(target_time_ns - now_ns);
1258     }
1259 }
1260 
1261 /**
1262  * @brief  Initialize threading system on primary CPU
1263  *
1264  * This function is called once, from kmain().
1265  * Disable PAC protection in case the arch enables PAC.  Suggest inlining to
1266  * avoid creating a non-PAC function if possible (inlining requires LTO).
1267  */
thread_init_early(void)1268 __ARCH_NO_PAC __ALWAYS_INLINE void thread_init_early(void)
1269 {
1270     int i;
1271 
1272     DEBUG_ASSERT(arch_curr_cpu_num() == 0);
1273 
1274     /* initialize the run queues */
1275     for (i=0; i < NUM_PRIORITIES; i++)
1276         list_initialize(&run_queue[i]);
1277 
1278     /* initialize the thread list */
1279     list_initialize(&thread_list);
1280 
1281     list_initialize(&dead_threads);
1282     wait_queue_init(&reaper_wait_queue);
1283 
1284     /* create a thread to cover the current running state */
1285     thread_t *t = idle_thread(0);
1286     init_thread_struct(t, "bootstrap");
1287 
1288     arch_init_thread_initialize(t, 0);
1289 
1290     /* half construct this thread, since we're already running */
1291     t->priority = HIGHEST_PRIORITY;
1292     t->state = THREAD_RUNNING;
1293     t->flags = THREAD_FLAG_DETACHED;
1294     thread_set_curr_cpu(t, 0);
1295     thread_set_pinned_cpu(t, 0);
1296     wait_queue_init(&t->retcode_wait_queue);
1297     list_add_head(&thread_list, &t->thread_list_node);
1298     cpu_priority[0] = t->priority;
1299     platform_cpu_priority_set(0, t->priority);
1300     set_current_thread(t);
1301     thread_init_cookie(t);
1302 }
1303 
thread_reaper_init(void)1304 static void thread_reaper_init(void)
1305 {
1306     thread_t *t = thread_create("reaper", reaper_thread_routine, NULL,
1307                                 HIGH_PRIORITY, DEFAULT_STACK_SIZE);
1308     if (!t) {
1309         dprintf(CRITICAL, "Failed to start reaper thread\n");
1310         return;
1311     }
1312     thread_detach_and_resume(t);
1313 }
1314 
1315 /**
1316  * @brief Complete thread initialization
1317  *
1318  * This function is called once at boot time
1319  */
thread_init(void)1320 void thread_init(void)
1321 {
1322     for (uint i = 0; i < SMP_MAX_CPUS; i++) {
1323         timer_initialize(&preempt_timer[i]);
1324     }
1325     thread_reaper_init();
1326 }
1327 
1328 /**
1329  * @brief Change name of current thread
1330  */
thread_set_name(const char * name)1331 void thread_set_name(const char *name)
1332 {
1333     thread_t *current_thread = get_current_thread();
1334     strlcpy(current_thread->name, name, sizeof(current_thread->name));
1335 }
1336 
1337 /**
1338  * @brief Change priority of current thread
1339  *
1340  * See thread_create() for a discussion of priority values.
1341  */
thread_set_priority(int priority)1342 void thread_set_priority(int priority)
1343 {
1344     DEBUG_ASSERT(!thread_lock_held());
1345     thread_t *current_thread = get_current_thread();
1346 
1347     THREAD_LOCK(state);
1348 
1349     if (priority <= IDLE_PRIORITY)
1350         priority = IDLE_PRIORITY + 1;
1351     if (priority > HIGHEST_PRIORITY)
1352         priority = HIGHEST_PRIORITY;
1353     current_thread->priority = priority;
1354 
1355     current_thread->state = THREAD_READY;
1356     insert_in_run_queue_head(current_thread);
1357     thread_resched();
1358 
1359     THREAD_UNLOCK(state);
1360 }
1361 
1362 /**
1363  * thread_set_pinned_cpu() - Pin thread to a given CPU.
1364  * @t:      Thread to pin
1365  * @cpu:    cpu id on which to pin the thread
1366  */
thread_set_pinned_cpu(thread_t * t,int cpu)1367 void thread_set_pinned_cpu(thread_t* t, int cpu) {
1368 #if WITH_SMP
1369     DEBUG_ASSERT(t);
1370     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1371     DEBUG_ASSERT(cpu >= -1 && cpu < SMP_MAX_CPUS);
1372     DEBUG_ASSERT(!thread_lock_held());
1373 
1374     THREAD_LOCK(state);
1375     if (t->pinned_cpu == cpu) {
1376         goto done;
1377     }
1378 
1379     t->pinned_cpu = cpu;
1380     if ((t->pinned_cpu > -1) && (t->pinned_cpu == t->curr_cpu)) {
1381         /*
1382          * No need to reschedule the thread on a new cpu.
1383          * This exit path is also used during the initial
1384          * boot phase when processors are being brought up:
1385          * see thread_init_early()
1386          * and thread_secondary_cpu_init_early()
1387          */
1388         goto done;
1389     }
1390 
1391     switch(t->state){
1392         case THREAD_SUSPENDED: {
1393             /*
1394              * Early init phase, thread not scheduled yet,
1395              * the cpu pinning will apply at a later stage
1396              * when thread is scheduled
1397              */
1398             goto done;
1399         }
1400         case THREAD_READY: {
1401             DEBUG_ASSERT(!thread_is_idle(t));
1402             DEBUG_ASSERT(t->curr_cpu == -1);
1403             thread_t *current_thread = get_current_thread();
1404             DEBUG_ASSERT(t != current_thread);
1405             /*
1406              * Thread `t` is ready and shall be rescheduled
1407              * according to a new cpu target (either the
1408              * pinned cpu if pinned cpu > -1, or any available
1409              * cpu if pinned cpu == -1).
1410              */
1411             int curr_cpu = arch_curr_cpu_num();
1412             if (t->pinned_cpu == -1 || t->pinned_cpu == curr_cpu) {
1413                 if (current_thread->priority < t->priority) {
1414                     /*
1415                      * if the thread is to be rescheduled on the current
1416                      * cpu due to being higher priority, thread_preempt
1417                      * shall be invoked.
1418                      */
1419                     thread_preempt_lock_held();
1420                     goto done;
1421                 }
1422                 if (t->pinned_cpu == -1
1423                     && thread_is_realtime(current_thread)) {
1424                     /*
1425                      * if the thread is unpinned, it may be rescheduled on
1426                      * another cpu. There are two cases:
1427                      * if the current thread is a standard thread, its time
1428                      * quantum tick handler thread_timer_callback(),
1429                      * will select the next best cpu for the top unpinned thread.
1430                      * However if the current thread is a real-time thread,
1431                      * its quantum slicing is disabled (by design),
1432                      * thus the newly unpinned thread shall be rescheduled
1433                      * manually on its best cpu
1434                      */
1435                     thread_cond_mp_reschedule(t, __func__);
1436                 }
1437             } else {
1438                 /*
1439                  * if the thread is pinned on another cpu than current
1440                  * an ipi may be sent to the best cpu. This is achieved
1441                  * by invoking thread_mp_reschedule().
1442                  */
1443                 thread_mp_reschedule(current_thread, t);
1444             }
1445             goto done;
1446         }
1447         case THREAD_RUNNING: {
1448             DEBUG_ASSERT(!thread_is_idle(t));
1449             int thread_curr_cpu = t->curr_cpu;
1450             DEBUG_ASSERT(thread_curr_cpu > -1);
1451             thread_t *current_thread = get_current_thread();
1452             if (t->pinned_cpu == -1){
1453                 /*
1454                  * pinned cpu is reset,
1455                  * current cpu still is a valid option,
1456                  * nothing to do
1457                  */
1458                 goto done;
1459             }
1460             /*
1461              * Thread `t` is running and its pinned cpu is
1462              * different from its current cpu, two cases to handle:
1463              * - Running on current cpu
1464              * - Running on another cpu than current
1465              */
1466             if (t == current_thread) {
1467                 /*
1468                  * Thread `t` is the current thread running
1469                  * on current cpu:
1470                  * (thread_set_pinned_cpu called from within
1471                  * current thread), the thread needs to be
1472                  * rescheduled to the new pinned cpu,
1473                  * this is handled within the thread_preempt
1474                  * call
1475                  */
1476                 DEBUG_ASSERT(thread_curr_cpu == (int)arch_curr_cpu_num());
1477                 thread_preempt_lock_held();
1478                 goto done;
1479             }
1480             /*
1481              * Thread `t` is running on another cpu than
1482              * the current one:
1483              * thread_preempt needs to be invoked on this
1484              * other cpu. We do this by invoking mp_reschedule
1485              * on the thread's current cpu, which in turns
1486              * invoke thread_resched to schedule out our thread
1487              * and finally send an IPI to the newly pinned cpu
1488              */
1489             DEBUG_ASSERT(thread_curr_cpu != (int)arch_curr_cpu_num());
1490             mp_reschedule(1UL << (uint)thread_curr_cpu, 0);
1491             goto done;
1492         }
1493         case THREAD_BLOCKED:
1494         case THREAD_SLEEPING: {
1495             /*
1496              * the new pinned cpu shall be taken into account
1497              * when the thread state change (to THREAD_READY)
1498              * happen - see thread_pinned_cond_mp_reschedule()
1499              */
1500             DEBUG_ASSERT(!thread_is_idle(t));
1501             DEBUG_ASSERT(t != get_current_thread());
1502             goto done;
1503         }
1504         case THREAD_DEATH: {
1505             /*
1506              * thread_set_pinned_cpu cannot be
1507              * invoked on such a dead/exited thread
1508              */
1509             DEBUG_ASSERT(false);
1510             goto done;
1511         }
1512         /*
1513          * Compiler option -Wswitch will catch missing
1514          * case statement if a new thread state
1515          * value is added and not handled.
1516          */
1517     }
1518 done:
1519     THREAD_UNLOCK(state);
1520 #if DEBUG_THREAD_CPU_PIN
1521     dprintf(ALWAYS,
1522             "%s(%d): thread %s, pinned_cpu %d, curr_cpu %d, state [%s]\n",
1523             __func__, cpu, t->name, t->pinned_cpu, t->curr_cpu,
1524             thread_state_to_str(t->state));
1525 #endif
1526 #endif
1527 }
1528 
1529 /**
1530  * @brief  Become an idle thread
1531  *
1532  * This function marks the current thread as the idle thread -- the one which
1533  * executes when there is nothing else to do.  This function does not return.
1534  * This function is called once at boot time.
1535  */
thread_become_idle(void)1536 void thread_become_idle(void)
1537 {
1538     DEBUG_ASSERT(arch_ints_disabled());
1539 
1540     thread_t *t = get_current_thread();
1541 
1542 #if WITH_SMP
1543     char name[16];
1544     snprintf(name, sizeof(name), "idle %d", arch_curr_cpu_num());
1545     thread_set_name(name);
1546 #else
1547     thread_set_name("idle");
1548 #endif
1549 
1550     /* mark ourself as idle */
1551     t->priority = IDLE_PRIORITY;
1552     t->flags |= THREAD_FLAG_IDLE;
1553     thread_set_pinned_cpu(t, arch_curr_cpu_num());
1554 
1555     mp_set_curr_cpu_active(true);
1556     mp_set_cpu_idle(arch_curr_cpu_num());
1557 
1558     /* enable interrupts and start the scheduler */
1559     arch_enable_ints();
1560     thread_yield();
1561 
1562     idle_thread_routine();
1563 }
1564 
1565 /**
1566  * @brief  Initialize threading system on secondary CPUs
1567  *
1568  * This function is called once per CPU, from lk_secondary_cpu_entry().
1569  * Disable PAC protection in case the arch enables PAC.  Suggest inlining to
1570  * avoid creating a non-PAC function if possible (inlining requires LTO).
1571  */
thread_secondary_cpu_init_early(void)1572 __ARCH_NO_PAC __ALWAYS_INLINE void thread_secondary_cpu_init_early(void)
1573 {
1574     DEBUG_ASSERT(arch_ints_disabled());
1575 
1576     /* construct an idle thread to cover our cpu */
1577     uint cpu = arch_curr_cpu_num();
1578     thread_t *t = idle_thread(cpu);
1579 
1580     char name[16];
1581     snprintf(name, sizeof(name), "idle %u", cpu);
1582     init_thread_struct(t, name);
1583     thread_set_pinned_cpu(t, cpu);
1584 
1585     /* half construct this thread, since we're already running */
1586     t->priority = HIGHEST_PRIORITY;
1587     t->state = THREAD_RUNNING;
1588     t->flags = THREAD_FLAG_DETACHED | THREAD_FLAG_IDLE;
1589     thread_set_curr_cpu(t, cpu);
1590     thread_set_pinned_cpu(t, cpu);
1591     wait_queue_init(&t->retcode_wait_queue);
1592 
1593     arch_init_thread_initialize(t, cpu);
1594 
1595     THREAD_LOCK(state);
1596 
1597     list_add_head(&thread_list, &t->thread_list_node);
1598     cpu_priority[cpu] = t->priority;
1599     platform_cpu_priority_set(cpu, t->priority);
1600     set_current_thread(t);
1601 
1602     THREAD_UNLOCK(state);
1603 }
1604 
thread_secondary_cpu_entry(void)1605 void thread_secondary_cpu_entry(void)
1606 {
1607     uint cpu = arch_curr_cpu_num();
1608     thread_t *t = get_current_thread();
1609     t->priority = IDLE_PRIORITY;
1610 
1611     mp_set_curr_cpu_active(true);
1612     mp_set_cpu_idle(cpu);
1613 
1614     /* enable interrupts and start the scheduler on this cpu */
1615     arch_enable_ints();
1616     thread_yield();
1617 
1618     idle_thread_routine();
1619 }
1620 
thread_state_to_str(enum thread_state state)1621 static const char *thread_state_to_str(enum thread_state state)
1622 {
1623     switch (state) {
1624         case THREAD_SUSPENDED:
1625             return "susp";
1626         case THREAD_READY:
1627             return "rdy";
1628         case THREAD_RUNNING:
1629             return "run";
1630         case THREAD_BLOCKED:
1631             return "blok";
1632         case THREAD_SLEEPING:
1633             return "slep";
1634         case THREAD_DEATH:
1635             return "deth";
1636         default:
1637             return "unkn";
1638     }
1639 }
1640 
thread_stack_used(thread_t * t)1641 static size_t thread_stack_used(thread_t *t) {
1642 #ifdef THREAD_STACK_HIGHWATER
1643     uint8_t *stack_base;
1644     size_t stack_size;
1645     size_t i;
1646 
1647     stack_base = t->stack;
1648     stack_size = t->stack_size;
1649 
1650     for (i = 0; i < stack_size; i++) {
1651         if (stack_base[i] != STACK_DEBUG_BYTE)
1652             break;
1653     }
1654     return stack_size - i;
1655 #else
1656     return 0;
1657 #endif
1658 }
1659 /**
1660  * @brief  Dump debugging info about the specified thread.
1661  */
dump_thread(thread_t * t)1662 void dump_thread(thread_t *t)
1663 {
1664     dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
1665 #if WITH_SMP
1666     dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
1667             thread_state_to_str(t->state), t->curr_cpu, t->pinned_cpu, t->priority, t->remaining_quantum);
1668 #else
1669     dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
1670             thread_state_to_str(t->state), t->priority, t->remaining_quantum);
1671 #endif
1672 #ifdef THREAD_STACK_HIGHWATER
1673     dprintf(INFO, "\tstack %p, stack_size %zd, stack_used %zd\n",
1674             t->stack, t->stack_size, thread_stack_used(t));
1675 #else
1676     dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
1677 #endif
1678 #if KERNEL_SCS_ENABLED
1679     dprintf(INFO, "\tshadow stack %p, shadow stack_size %zd\n",
1680             t->shadow_stack, t->shadow_stack_size);
1681 
1682 #endif
1683     dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
1684     dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
1685 #if WITH_KERNEL_VM
1686     dprintf(INFO, "\taspace %p\n", t->aspace);
1687 #endif
1688 #if (MAX_TLS_ENTRY > 0)
1689     dprintf(INFO, "\ttls:");
1690     int i;
1691     for (i=0; i < MAX_TLS_ENTRY; i++) {
1692         dprintf(INFO, " 0x%lx", t->tls[i]);
1693     }
1694     dprintf(INFO, "\n");
1695 #endif
1696     arch_dump_thread(t);
1697 }
1698 
1699 /**
1700  * @brief  Dump debugging info about all threads
1701  */
dump_all_threads(void)1702 void dump_all_threads(void)
1703 {
1704     thread_t *t;
1705 
1706     THREAD_LOCK(state);
1707     list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
1708         if (t->magic != THREAD_MAGIC) {
1709             dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
1710             hexdump(t, sizeof(thread_t));
1711             break;
1712         }
1713         dump_thread(t);
1714     }
1715     THREAD_UNLOCK(state);
1716 }
1717 
1718 /** @} */
1719 
1720 
1721 /**
1722  * @defgroup  wait  Wait Queue
1723  * @{
1724  */
wait_queue_init(wait_queue_t * wait)1725 void wait_queue_init(wait_queue_t *wait)
1726 {
1727     *wait = (wait_queue_t)WAIT_QUEUE_INITIAL_VALUE(*wait);
1728 }
1729 
wait_queue_timeout_handler(timer_t * timer,lk_time_ns_t now,void * arg)1730 static enum handler_return wait_queue_timeout_handler(timer_t *timer,
1731                                                       lk_time_ns_t now,
1732                                                       void *arg)
1733 {
1734     thread_t *thread = (thread_t *)arg;
1735 
1736     DEBUG_ASSERT(thread->magic == THREAD_MAGIC);
1737 
1738     thread_lock_ints_disabled();
1739 
1740     enum handler_return ret = INT_NO_RESCHEDULE;
1741     if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
1742         ret = INT_RESCHEDULE;
1743     }
1744 
1745     thread_unlock_ints_disabled();
1746 
1747     return ret;
1748 }
1749 
1750 /**
1751  * @brief  Block until a wait queue is notified.
1752  *
1753  * This function puts the current thread at the end of a wait
1754  * queue and then blocks until some other thread wakes the queue
1755  * up again.
1756  *
1757  * @param  wait     The wait queue to enter
1758  * @param  timeout  The maximum time, in ms, to wait
1759  *
1760  * If the timeout is zero, this function returns immediately with
1761  * ERR_TIMED_OUT.  If the timeout is INFINITE_TIME, this function
1762  * waits indefinitely.  Otherwise, this function returns with
1763  * ERR_TIMED_OUT at the end of the timeout period.
1764  *
1765  * @return ERR_TIMED_OUT on timeout, else returns the return
1766  * value specified when the queue was woken by wait_queue_wake_one().
1767  */
wait_queue_block(wait_queue_t * wait,lk_time_t timeout)1768 status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
1769 {
1770     timer_t timer;
1771 
1772     thread_t *current_thread = get_current_thread();
1773 
1774     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1775     DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1776     DEBUG_ASSERT(arch_ints_disabled());
1777     DEBUG_ASSERT(thread_lock_held());
1778 
1779     if (timeout == 0)
1780         return ERR_TIMED_OUT;
1781 
1782     list_add_tail(&wait->list, &current_thread->queue_node);
1783     wait->count++;
1784     current_thread->state = THREAD_BLOCKED;
1785     current_thread->blocking_wait_queue = wait;
1786     current_thread->wait_queue_block_ret = NO_ERROR;
1787 
1788     /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
1789     if (timeout != INFINITE_TIME) {
1790         timer_initialize(&timer);
1791         timer_set_oneshot_ns(&timer, MS2NS(timeout),
1792                              wait_queue_timeout_handler,
1793                              (void *)current_thread);
1794     }
1795 
1796     thread_resched();
1797 
1798     /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
1799     if (timeout != INFINITE_TIME) {
1800         /*
1801          * The timer could be running on another CPU. Drop the thread-lock then
1802          * cancel and wait for the stack allocated timer.
1803          */
1804         thread_unlock_ints_disabled();
1805         arch_enable_ints();
1806         timer_cancel_sync(&timer);
1807         arch_disable_ints();
1808         thread_lock_ints_disabled();
1809     }
1810 
1811     return current_thread->wait_queue_block_ret;
1812 }
1813 
1814 /**
1815  * @brief  Wake up one thread sleeping on a wait queue
1816  *
1817  * This function removes one thread (if any) from the head of the wait queue and
1818  * makes it executable.  The new thread will be placed at the head of the
1819  * run queue.
1820  *
1821  * @param wait  The wait queue to wake
1822  * @param reschedule  If true, the newly-woken thread will run immediately.
1823  * @param wait_queue_error  The return value which the new thread will receive
1824  * from wait_queue_block().
1825  *
1826  * @return  The number of threads woken (zero or one)
1827  */
wait_queue_wake_one(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1828 int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
1829 {
1830     thread_t *t;
1831     int ret = 0;
1832 
1833     thread_t *current_thread = get_current_thread();
1834 
1835     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1836     DEBUG_ASSERT(arch_ints_disabled());
1837     DEBUG_ASSERT(thread_lock_held());
1838 
1839     t = list_remove_head_type(&wait->list, thread_t, queue_node);
1840     if (t) {
1841         wait->count--;
1842         DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1843         DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1844         thread_check_cookie(t);
1845         t->state = THREAD_READY;
1846         t->wait_queue_block_ret = wait_queue_error;
1847         t->blocking_wait_queue = NULL;
1848 
1849         /* if we're instructed to reschedule, stick the current thread on the head
1850          * of the run queue first, so that the newly awakened thread gets a chance to run
1851          * before the current one, but the current one doesn't get unnecessarilly punished.
1852          */
1853         if (reschedule) {
1854             current_thread->state = THREAD_READY;
1855             insert_in_run_queue_head(current_thread);
1856         }
1857         insert_in_run_queue_head(t);
1858         thread_mp_reschedule(current_thread, t);
1859         if (reschedule) {
1860             thread_resched();
1861         }
1862         ret = 1;
1863 
1864     }
1865 
1866     return ret;
1867 }
1868 
1869 
1870 /**
1871  * @brief  Wake all threads sleeping on a wait queue
1872  *
1873  * This function removes all threads (if any) from the wait queue and
1874  * makes them executable.  The new threads will be placed at the head of the
1875  * run queue.
1876  *
1877  * @param wait  The wait queue to wake
1878  * @param reschedule  If true, the newly-woken threads will run immediately.
1879  * @param wait_queue_error  The return value which the new thread will receive
1880  * from wait_queue_block().
1881  *
1882  * @return  The number of threads woken (zero or one)
1883  */
wait_queue_wake_all(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1884 int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
1885 {
1886     thread_t *t;
1887     int ret = 0;
1888     mp_cpu_mask_t mp_reschedule_target = 0;
1889 
1890     thread_t *current_thread = get_current_thread();
1891 
1892     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1893     DEBUG_ASSERT(arch_ints_disabled());
1894     DEBUG_ASSERT(thread_lock_held());
1895 
1896     if (reschedule && wait->count > 0) {
1897         /* if we're instructed to reschedule, stick the current thread on the head
1898          * of the run queue first, so that the newly awakened threads get a chance to run
1899          * before the current one, but the current one doesn't get unnecessarilly punished.
1900          */
1901         current_thread->state = THREAD_READY;
1902         insert_in_run_queue_head(current_thread);
1903     }
1904 
1905     /* pop all the threads off the wait queue into the run queue */
1906     while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
1907         wait->count--;
1908         DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1909         DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1910         thread_check_cookie(t);
1911         t->state = THREAD_READY;
1912         t->wait_queue_block_ret = wait_queue_error;
1913         t->blocking_wait_queue = NULL;
1914 
1915         insert_in_run_queue_head(t);
1916         mp_reschedule_target |= thread_get_mp_reschedule_target(current_thread, t);
1917         ret++;
1918     }
1919 
1920     DEBUG_ASSERT(wait->count == 0);
1921 
1922     if (ret > 0) {
1923         mp_reschedule(mp_reschedule_target, 0);
1924         if (reschedule) {
1925             DEBUG_ASSERT(current_thread->state == THREAD_READY);
1926             thread_resched();
1927         } else {
1928             /*
1929              * Verify that thread_resched is not skipped when
1930              * thread state changes to THREAD_READY
1931              */
1932             DEBUG_ASSERT(current_thread->state != THREAD_READY);
1933         }
1934     } else {
1935         /*
1936          * Verify that thread_resched is not skipped when
1937          * thread state changes to THREAD_READY
1938          */
1939         DEBUG_ASSERT(current_thread->state != THREAD_READY);
1940     }
1941 
1942     return ret;
1943 }
1944 
1945 /**
1946  * @brief  Free all resources allocated in wait_queue_init()
1947  *
1948  * If any threads were waiting on this queue, they are all woken.
1949  */
wait_queue_destroy(wait_queue_t * wait,bool reschedule)1950 void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
1951 {
1952     DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1953     DEBUG_ASSERT(arch_ints_disabled());
1954     DEBUG_ASSERT(thread_lock_held());
1955 
1956     wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
1957     wait->magic = 0;
1958 }
1959 
1960 /**
1961  * @brief  Wake a specific thread in a wait queue
1962  *
1963  * This function extracts a specific thread from a wait queue, wakes it, and
1964  * puts it at the head of the run queue.
1965  *
1966  * @param t  The thread to wake
1967  * @param wait_queue_error  The return value which the new thread will receive
1968  *   from wait_queue_block().
1969  *
1970  * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
1971  */
thread_unblock_from_wait_queue(thread_t * t,status_t wait_queue_error)1972 status_t thread_unblock_from_wait_queue(thread_t *t, status_t wait_queue_error)
1973 {
1974     DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1975     DEBUG_ASSERT(arch_ints_disabled());
1976     DEBUG_ASSERT(thread_lock_held());
1977 
1978     thread_check_cookie(t);
1979 
1980     if (t->state != THREAD_BLOCKED)
1981         return ERR_NOT_BLOCKED;
1982 
1983     DEBUG_ASSERT(t->blocking_wait_queue != NULL);
1984     DEBUG_ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
1985     DEBUG_ASSERT(list_in_list(&t->queue_node));
1986 
1987     list_delete(&t->queue_node);
1988     t->blocking_wait_queue->count--;
1989     t->blocking_wait_queue = NULL;
1990     t->state = THREAD_READY;
1991     t->wait_queue_block_ret = wait_queue_error;
1992     insert_in_run_queue_head(t);
1993     thread_mp_reschedule(get_current_thread(), t);
1994 
1995     return NO_ERROR;
1996 }
1997 
1998 #if defined(WITH_DEBUGGER_INFO)
1999 // This is, by necessity, arch-specific, and arm-m specific right now,
2000 // but lives here due to thread_list being static.
2001 //
2002 // It contains sufficient information for a remote debugger to walk
2003 // the thread list without needing the symbols and debug sections in
2004 // the elf binary for lk or the ability to parse them.
2005 const struct __debugger_info__ {
2006     u32 version; // flags:16 major:8 minor:8
2007     void *thread_list_ptr;
2008     void *current_thread_ptr;
2009     u8 off_list_node;
2010     u8 off_state;
2011     u8 off_saved_sp;
2012     u8 off_was_preempted;
2013     u8 off_name;
2014     u8 off_waitq;
2015 } _debugger_info = {
2016     .version = 0x0100,
2017     .thread_list_ptr = &thread_list,
2018     .current_thread_ptr = &_current_thread,
2019     .off_list_node = __builtin_offsetof(thread_t, thread_list_node),
2020     .off_state = __builtin_offsetof(thread_t, state),
2021     .off_saved_sp = __builtin_offsetof(thread_t, arch.sp),
2022     .off_was_preempted = __builtin_offsetof(thread_t, arch.was_preempted),
2023     .off_name = __builtin_offsetof(thread_t, name),
2024     .off_waitq = __builtin_offsetof(thread_t, blocking_wait_queue),
2025 };
2026 #endif
2027