1 /*
2  * Copyright (c) 2008-2015 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #ifndef __KERNEL_THREAD_H
24 #define __KERNEL_THREAD_H
25 
26 #include <assert.h>
27 #include <sys/types.h>
28 #include <list.h>
29 #include <compiler.h>
30 #include <arch/defines.h>
31 #include <arch/ops.h>
32 #include <arch/thread.h>
33 #include <kernel/wait.h>
34 #include <kernel/spinlock.h>
35 #include <stdatomic.h>
36 #include <debug.h>
37 
38 #if WITH_KERNEL_VM
39 /* forward declaration */
40 typedef struct vmm_aspace vmm_aspace_t;
41 #endif
42 
43 __BEGIN_CDECLS;
44 
45 /* debug-enable runtime checks */
46 #if LK_DEBUGLEVEL > 1
47 #define THREAD_STATS 1
48 #define THREAD_STACK_HIGHWATER 1
49 #endif
50 
51 enum thread_state {
52     THREAD_SUSPENDED = 0,
53     THREAD_READY,
54     THREAD_RUNNING,
55     THREAD_BLOCKED,
56     THREAD_SLEEPING,
57     THREAD_DEATH,
58 };
59 
60 typedef int (*thread_start_routine)(void *arg);
61 
62 /* thread local storage */
63 enum thread_tls_list {
64 #ifdef WITH_LIB_TRUSTY
65     TLS_ENTRY_TRUSTY,
66 #endif
67 #ifdef WITH_LIB_LKUSER
68     TLS_ENTRY_LKUSER,
69 #endif
70 #if defined(UBSAN_ENABLED) || defined(CFI_ENABLED)
71     TLS_ENTRY_UBSAN,
72 #endif
73 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
74     TLS_ENTRY_LIBC,
75 #endif
76     MAX_TLS_ENTRY
77 };
78 
79 #define THREAD_FLAG_DETACHED                  (1U<<0)
80 #define THREAD_FLAG_FREE_STACK                (1U<<1)
81 #define THREAD_FLAG_FREE_STRUCT               (1U<<2)
82 #define THREAD_FLAG_REAL_TIME                 (1U<<3)
83 #define THREAD_FLAG_IDLE                      (1U<<4)
84 #define THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK  (1U<<5)
85 #define THREAD_FLAG_EXIT_ON_PANIC             (1U<<6)
86 #define THREAD_FLAG_FREE_SHADOW_STACK         (1U<<7)
87 #define THREAD_FLAG_FREE_LIBC_STATE           (1U<<8)
88 
89 #define THREAD_MAGIC (0x74687264) // 'thrd'
90 
91 typedef struct thread {
92     /* stack stuff, don't move, used by assembly code to validate stack */
93     void *stack;
94     void *stack_high;
95     size_t stack_size;
96 #if KERNEL_SCS_ENABLED
97     void *shadow_stack; /* accessed from assembly code */
98     size_t shadow_stack_size;
99 #endif
100 
101     /* helps detect benign corruption */
102     int magic;
103     /* helps detect malicious corruption */
104     uint64_t cookie;
105     struct list_node thread_list_node;
106 
107     /* active bits */
108     struct list_node queue_node;
109     int priority;
110     enum thread_state state;
111     int remaining_quantum;
112     unsigned int flags;
113 #if WITH_SMP
114     int curr_cpu;
115     int pinned_cpu; /* only run on pinned_cpu if >= 0 */
116 #endif
117 #if WITH_KERNEL_VM
118     vmm_aspace_t *aspace;
119 #endif
120 
121     /* if blocked, a pointer to the wait queue */
122     struct wait_queue *blocking_wait_queue;
123     status_t wait_queue_block_ret;
124 
125     /* architecture stuff */
126     struct arch_thread arch;
127 
128     /* entry point */
129     thread_start_routine entry;
130     void *arg;
131 
132     /* return code */
133     int retcode;
134     struct wait_queue retcode_wait_queue;
135 
136     /* thread local storage */
137     atomic_uintptr_t tls[MAX_TLS_ENTRY];
138 
139     char name[32];
140 } thread_t;
141 
142 #if WITH_SMP
143 #define thread_curr_cpu(t) ((t)->curr_cpu)
144 #define thread_pinned_cpu(t) ((t)->pinned_cpu)
145 #define thread_set_curr_cpu(t,c) ((t)->curr_cpu = (c))
146 #else
147 #define thread_curr_cpu(t) (0)
148 #define thread_pinned_cpu(t) (-1)
149 #define thread_set_curr_cpu(t,c) do {} while(0)
150 #endif
151 
152 /* thread priority */
153 #define NUM_PRIORITIES 32
154 #define LOWEST_PRIORITY 0
155 #define HIGHEST_PRIORITY (NUM_PRIORITIES - 1)
156 #define DPC_PRIORITY (NUM_PRIORITIES - 2)
157 #define IDLE_PRIORITY LOWEST_PRIORITY
158 #define LOW_PRIORITY (NUM_PRIORITIES / 4)
159 #define DEFAULT_PRIORITY (NUM_PRIORITIES / 2)
160 #define HIGH_PRIORITY ((NUM_PRIORITIES / 4) * 3)
161 
162 /* stack size */
163 #ifdef CUSTOM_DEFAULT_STACK_SIZE
164 #define DEFAULT_STACK_SIZE CUSTOM_DEFAULT_STACK_SIZE
165 #else
166 #define DEFAULT_STACK_SIZE ARCH_DEFAULT_STACK_SIZE
167 #endif
168 
169 /* shadow stack size */
170 #ifdef CUSTOM_DEFAULT_SHADOW_STACK_SIZE
171 #define DEFAULT_SHADOW_STACK_SIZE CUSTOM_DEFAULT_SHADOW_STACK_SIZE
172 #elif defined(ARCH_DEFAULT_SHADOW_STACK_SIZE)
173 #define DEFAULT_SHADOW_STACK_SIZE ARCH_DEFAULT_SHADOW_STACK_SIZE
174 #else
175 #define DEFAULT_SHADOW_STACK_SIZE PAGE_SIZE
176 #endif
177 
178 /* functions */
179 void thread_init_early(void);
180 void thread_init(void);
181 void thread_become_idle(void) __NO_RETURN;
182 void thread_secondary_cpu_init_early(void);
183 void thread_secondary_cpu_entry(void) __NO_RETURN;
184 void thread_set_name(const char *name);
185 
186 /**
187  * thread_set_priority() - set priority of current thread
188  * @priority:      Priority for the current thread,
189  *                 between %LOWEST_PRIORITY
190  *                 and %HIGHEST_PRIORITY
191  *
192  * Context:        This function shall be invoked without
193  *                 holding the thread lock.
194  */
195 void thread_set_priority(int priority);
196 
197 /**
198  * thread_set_pinned_cpu() - Pin thread to a given CPU.
199  * @t:             Thread to pin
200  * @cpu:           cpu id on which to pin the thread
201  *
202  * Context:        This function shall be invoked without
203  *                 holding the thread lock.
204  */
205 void thread_set_pinned_cpu(thread_t* t, int cpu);
206 
207 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size);
208 thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size, size_t shadow_stack_size);
209 status_t thread_resume(thread_t *);
210 void thread_exit(int retcode) __NO_RETURN;
211 void thread_exit_from_panic(void) __NO_RETURN;
212 void thread_sleep_ns(lk_time_ns_t delay_ns);
213 void thread_sleep_until_ns(lk_time_ns_t target_time_ns);
thread_sleep(lk_time_t delay_ms)214 static inline void thread_sleep(lk_time_t delay_ms) {
215     thread_sleep_ns(delay_ms * 1000ULL * 1000);
216 }
217 status_t thread_detach(thread_t *t);
218 status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
219 status_t thread_detach_and_resume(thread_t *t);
220 status_t thread_set_real_time(thread_t *t);
221 
222 void dump_thread(thread_t *t);
223 void arch_dump_thread(thread_t *t);
224 void dump_all_threads(void);
225 
226 /* scheduler routines */
227 void thread_yield(void); /* give up the cpu voluntarily */
228 void thread_preempt(void); /* get preempted (inserted into head of run queue) */
229 void thread_block(void); /* block on something and reschedule */
230 void thread_unblock(thread_t *t, bool resched); /* go back in the run queue */
231 
232 /* the current thread */
233 thread_t *get_current_thread(void);
234 void set_current_thread(thread_t *);
235 
236 /* scheduler lock */
237 extern spin_lock_t thread_lock;
238 extern atomic_uint thread_lock_owner;
239 
thread_lock_owner_get(void)240 static inline uint thread_lock_owner_get(void) {
241     return atomic_load_explicit(&thread_lock_owner, memory_order_relaxed);
242 }
243 
thread_lock_complete(void)244 static inline void thread_lock_complete(void) {
245     DEBUG_ASSERT(thread_lock_owner_get() == SMP_MAX_CPUS);
246     atomic_store_explicit(&thread_lock_owner, arch_curr_cpu_num(),
247                           memory_order_relaxed);
248 }
249 
thread_unlock_prepare(void)250 static inline void thread_unlock_prepare(void) {
251     DEBUG_ASSERT(arch_ints_disabled());
252     DEBUG_ASSERT(thread_lock_owner_get() == arch_curr_cpu_num());
253     atomic_store_explicit(&thread_lock_owner, (uint)SMP_MAX_CPUS,
254                           memory_order_relaxed);
255 }
256 
257 #define THREAD_LOCK(state) \
258     spin_lock_saved_state_t state; \
259     spin_lock_irqsave(&thread_lock, state); \
260     thread_lock_complete()
261 
262 #define THREAD_UNLOCK(state) \
263     thread_unlock_prepare(); \
264     spin_unlock_irqrestore(&thread_lock, state)
265 
thread_lock_ints_disabled(void)266 static inline void thread_lock_ints_disabled(void) {
267     DEBUG_ASSERT(arch_ints_disabled());
268     spin_lock(&thread_lock);
269     thread_lock_complete();
270 }
271 
thread_unlock_ints_disabled(void)272 static inline void thread_unlock_ints_disabled(void) {
273     thread_unlock_prepare();
274     spin_unlock(&thread_lock);
275 }
276 
thread_lock_held(void)277 static inline bool thread_lock_held(void)
278 {
279     bool ret;
280     spin_lock_saved_state_t state;
281     arch_interrupt_save(&state, SPIN_LOCK_FLAG_INTERRUPTS);
282     ret = thread_lock_owner_get() == arch_curr_cpu_num();
283     arch_interrupt_restore(state, SPIN_LOCK_FLAG_INTERRUPTS);
284     return ret;
285 }
286 
287 /* thread local storage */
thread_tls_get(thread_t * t,uint entry)288 static inline __ALWAYS_INLINE uintptr_t thread_tls_get(thread_t *t, uint entry)
289 {
290     return atomic_load(&t->tls[entry]);
291 }
292 
tls_get(uint entry)293 static inline __ALWAYS_INLINE uintptr_t tls_get(uint entry)
294 {
295     return thread_tls_get(get_current_thread(), entry);
296 }
297 
__thread_tls_set(thread_t * t,uint entry,uintptr_t val)298 static inline __ALWAYS_INLINE uintptr_t __thread_tls_set(thread_t *t,
299                                                          uint entry,
300                                                          uintptr_t val)
301 {
302     return atomic_exchange(&t->tls[entry], val);
303 }
304 
305 #define thread_tls_set(t,e,v) \
306     ({ \
307         STATIC_ASSERT((e) < MAX_TLS_ENTRY); \
308         __thread_tls_set(t, e, v); \
309     })
310 
311 #define tls_set(e,v) thread_tls_set(get_current_thread(), e, v)
312 
thread_set_flag(thread_t * t,uint flag,bool enable)313 static inline void thread_set_flag(thread_t *t, uint flag, bool enable)
314 {
315     THREAD_LOCK(state);
316     if (enable) {
317         t->flags |= flag;
318     } else {
319         t->flags &= ~flag;
320     }
321     THREAD_UNLOCK(state);
322 }
323 
thread_get_flag(thread_t * t,uint flag)324 static inline bool thread_get_flag(thread_t *t, uint flag)
325 {
326     bool enabled;
327     THREAD_LOCK(state);
328     enabled = t->flags & flag;
329     THREAD_UNLOCK(state);
330     return enabled;
331 }
332 
333 /**
334  * thread_set_flag_exit_on_panic - Set flag to ignore panic in specific thread
335  * @t:       Thread to set flag on
336  * @enable:  If %true, exit thread instead of halting system if panic is called
337  *           from @t. If %false, halt system if panic is called from @t
338  *           (default behavior).
339  *
340  * Should only be used for kernel test threads as it is generally not safe to
341  * proceed kernel execution after panic has been called.
342  */
thread_set_flag_exit_on_panic(thread_t * t,bool enable)343 static inline void thread_set_flag_exit_on_panic(thread_t *t, bool enable)
344 {
345     thread_set_flag(t, THREAD_FLAG_EXIT_ON_PANIC, enable);
346 }
347 
thread_get_flag_exit_on_panic(thread_t * t)348 static inline bool thread_get_flag_exit_on_panic(thread_t *t)
349 {
350     return thread_get_flag(t, THREAD_FLAG_EXIT_ON_PANIC);
351 }
352 
353 /* thread level statistics */
354 #if THREAD_STATS
355 struct thread_stats {
356     lk_time_ns_t idle_time;
357     lk_time_ns_t last_idle_timestamp;
358     ulong reschedules;
359     ulong context_switches;
360     ulong preempts;
361     ulong yields;
362     ulong interrupts; /* platform code increment this */
363     ulong timer_ints; /* timer code increment this */
364     ulong timers; /* timer code increment this */
365 
366 #if WITH_SMP
367     ulong reschedule_ipis;
368 #endif
369 };
370 
371 extern struct thread_stats thread_stats[SMP_MAX_CPUS];
372 
373 #define THREAD_STATS_INC(name) do { thread_stats[arch_curr_cpu_num()].name++; } while(0)
374 
375 #else
376 
377 #define THREAD_STATS_INC(name) do { } while (0)
378 
379 #endif
380 
381 __END_CDECLS;
382 
383 #endif
384