xref: /aosp_15_r20/external/coreboot/src/lib/thread.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <assert.h>
4 #include <bootstate.h>
5 #include <console/console.h>
6 #include <smp/node.h>
7 #include <thread.h>
8 #include <timer.h>
9 #include <types.h>
10 
11 static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t));
12 static bool initialized;
13 
14 static void idle_thread_init(void);
15 
16 /* There needs to be at least one thread to run the ramstate state machine. */
17 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
18 
19 /* Storage space for the thread structs .*/
20 static struct thread all_threads[TOTAL_NUM_THREADS];
21 
22 /* All runnable (but not running) and free threads are kept on their
23  * respective lists. */
24 static struct thread *runnable_threads;
25 static struct thread *free_threads;
26 
27 static struct thread *active_thread;
28 
thread_can_yield(const struct thread * t)29 static inline int thread_can_yield(const struct thread *t)
30 {
31 	return (t != NULL && t->can_yield > 0);
32 }
33 
set_current_thread(struct thread * t)34 static inline void set_current_thread(struct thread *t)
35 {
36 	assert(boot_cpu());
37 	active_thread = t;
38 }
39 
current_thread(void)40 static inline struct thread *current_thread(void)
41 {
42 	if (!initialized || !boot_cpu())
43 		return NULL;
44 
45 	return active_thread;
46 }
47 
thread_list_empty(struct thread ** list)48 static inline int thread_list_empty(struct thread **list)
49 {
50 	return *list == NULL;
51 }
52 
pop_thread(struct thread ** list)53 static inline struct thread *pop_thread(struct thread **list)
54 {
55 	struct thread *t;
56 
57 	t = *list;
58 	*list = t->next;
59 	t->next = NULL;
60 	return t;
61 }
62 
push_thread(struct thread ** list,struct thread * t)63 static inline void push_thread(struct thread **list, struct thread *t)
64 {
65 	t->next = *list;
66 	*list = t;
67 }
68 
push_runnable(struct thread * t)69 static inline void push_runnable(struct thread *t)
70 {
71 	push_thread(&runnable_threads, t);
72 }
73 
pop_runnable(void)74 static inline struct thread *pop_runnable(void)
75 {
76 	return pop_thread(&runnable_threads);
77 }
78 
get_free_thread(void)79 static inline struct thread *get_free_thread(void)
80 {
81 	struct thread *t;
82 
83 	if (thread_list_empty(&free_threads))
84 		return NULL;
85 
86 	t = pop_thread(&free_threads);
87 
88 	/* Reset the current stack value to the original. */
89 	if (!t->stack_orig)
90 		die("%s: Invalid stack value\n", __func__);
91 
92 	t->stack_current = t->stack_orig;
93 
94 	return t;
95 }
96 
free_thread(struct thread * t)97 static inline void free_thread(struct thread *t)
98 {
99 	push_thread(&free_threads, t);
100 }
101 
102 /* The idle thread is ran whenever there isn't anything else that is runnable.
103  * It's sole responsibility is to ensure progress is made by running the timer
104  * callbacks. */
idle_thread(void * unused)105 __noreturn static enum cb_err idle_thread(void *unused)
106 {
107 	/* This thread never voluntarily yields. */
108 	thread_coop_disable();
109 	while (1)
110 		timers_run();
111 }
112 
schedule(struct thread * t)113 static void schedule(struct thread *t)
114 {
115 	struct thread *current = current_thread();
116 
117 	/* If t is NULL need to find new runnable thread. */
118 	if (t == NULL) {
119 		if (thread_list_empty(&runnable_threads))
120 			die("Runnable thread list is empty!\n");
121 		t = pop_runnable();
122 	} else {
123 		/* current is still runnable. */
124 		push_runnable(current);
125 	}
126 
127 	if (t->handle)
128 		t->handle->state = THREAD_STARTED;
129 
130 	set_current_thread(t);
131 
132 	switch_to_thread(t->stack_current, &current->stack_current);
133 }
134 
terminate_thread(struct thread * t,enum cb_err error)135 static void terminate_thread(struct thread *t, enum cb_err error)
136 {
137 	if (t->handle) {
138 		t->handle->error = error;
139 		t->handle->state = THREAD_DONE;
140 	}
141 
142 	free_thread(t);
143 	schedule(NULL);
144 }
145 
call_wrapper(void * unused)146 static asmlinkage void call_wrapper(void *unused)
147 {
148 	struct thread *current = current_thread();
149 	enum cb_err error;
150 
151 	error = current->entry(current->entry_arg);
152 
153 	terminate_thread(current, error);
154 }
155 
156 struct block_boot_state {
157 	boot_state_t state;
158 	boot_state_sequence_t seq;
159 };
160 
161 /* Block the provided state until thread is complete. */
call_wrapper_block_state(void * arg)162 static asmlinkage void call_wrapper_block_state(void *arg)
163 {
164 	struct block_boot_state *bbs = arg;
165 	struct thread *current = current_thread();
166 	enum cb_err error;
167 
168 	boot_state_block(bbs->state, bbs->seq);
169 	error = current->entry(current->entry_arg);
170 	boot_state_unblock(bbs->state, bbs->seq);
171 	terminate_thread(current, error);
172 }
173 
174 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
175  * Within thread_entry() it will call func(arg). */
prepare_thread(struct thread * t,struct thread_handle * handle,enum cb_err (* func)(void *),void * arg,asmlinkage void (* thread_entry)(void *),void * thread_arg)176 static void prepare_thread(struct thread *t, struct thread_handle *handle,
177 			   enum cb_err (*func)(void *), void *arg,
178 			   asmlinkage void (*thread_entry)(void *), void *thread_arg)
179 {
180 	/* Stash the function and argument to run. */
181 	t->entry = func;
182 	t->entry_arg = arg;
183 
184 	/* All new threads can yield by default. */
185 	t->can_yield = 1;
186 
187 	/* Pointer used to publish the state of thread */
188 	t->handle = handle;
189 
190 	arch_prepare_thread(t, thread_entry, thread_arg);
191 }
192 
thread_resume_from_timeout(struct timeout_callback * tocb)193 static void thread_resume_from_timeout(struct timeout_callback *tocb)
194 {
195 	struct thread *to;
196 
197 	to = tocb->priv;
198 	schedule(to);
199 }
200 
idle_thread_init(void)201 static void idle_thread_init(void)
202 {
203 	struct thread *t;
204 
205 	t = get_free_thread();
206 
207 	if (t == NULL)
208 		die("No threads available for idle thread!\n");
209 
210 	/* Queue idle thread to run once all other threads have yielded. */
211 	prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL);
212 	push_runnable(t);
213 }
214 
215 /* Don't inline this function so the timeout_callback won't have its storage
216  * space on the stack cleaned up before the call to schedule(). */
217 static int __attribute__((noinline))
thread_yield_timed_callback(struct timeout_callback * tocb,unsigned int microsecs)218 thread_yield_timed_callback(struct timeout_callback *tocb,
219 	unsigned int microsecs)
220 {
221 	tocb->priv = current_thread();
222 	tocb->callback = thread_resume_from_timeout;
223 
224 	if (timer_sched_callback(tocb, microsecs))
225 		return -1;
226 
227 	/* The timer callback will wake up the current thread. */
228 	schedule(NULL);
229 	return 0;
230 }
231 
thread_alloc_space(struct thread * t,size_t bytes)232 static void *thread_alloc_space(struct thread *t, size_t bytes)
233 {
234 	/* Allocate the amount of space on the stack keeping the stack
235 	 * aligned to the pointer size. */
236 	t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
237 
238 	return (void *)t->stack_current;
239 }
240 
threads_initialize(void)241 static void threads_initialize(void)
242 {
243 	int i;
244 	struct thread *t;
245 	u8 *stack_top;
246 
247 	if (initialized)
248 		return;
249 
250 	t = &all_threads[0];
251 
252 	set_current_thread(t);
253 
254 	t->stack_orig = (uintptr_t)NULL; /* We never free the main thread */
255 	t->id = 0;
256 	t->can_yield = 1;
257 
258 	stack_top = &thread_stacks[CONFIG_STACK_SIZE];
259 	for (i = 1; i < TOTAL_NUM_THREADS; i++) {
260 		t = &all_threads[i];
261 		t->stack_orig = (uintptr_t)stack_top;
262 		t->id = i;
263 		stack_top += CONFIG_STACK_SIZE;
264 		free_thread(t);
265 	}
266 
267 	idle_thread_init();
268 
269 	initialized = 1;
270 }
271 
thread_run(struct thread_handle * handle,enum cb_err (* func)(void *),void * arg)272 int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg)
273 {
274 	struct thread *current;
275 	struct thread *t;
276 
277 	/* Lazy initialization */
278 	threads_initialize();
279 
280 	current = current_thread();
281 
282 	if (!thread_can_yield(current)) {
283 		printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
284 		return -1;
285 	}
286 
287 	t = get_free_thread();
288 
289 	if (t == NULL) {
290 		printk(BIOS_ERR, "%s: No more threads!\n", __func__);
291 		return -1;
292 	}
293 
294 	prepare_thread(t, handle, func, arg, call_wrapper, NULL);
295 	schedule(t);
296 
297 	return 0;
298 }
299 
thread_run_until(struct thread_handle * handle,enum cb_err (* func)(void *),void * arg,boot_state_t state,boot_state_sequence_t seq)300 int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg,
301 		     boot_state_t state, boot_state_sequence_t seq)
302 {
303 	struct thread *current;
304 	struct thread *t;
305 	struct block_boot_state *bbs;
306 
307 	/* This is a ramstage specific API */
308 	if (!ENV_RAMSTAGE)
309 		dead_code();
310 
311 	/* Lazy initialization */
312 	threads_initialize();
313 
314 	current = current_thread();
315 
316 	if (!thread_can_yield(current)) {
317 		printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
318 		return -1;
319 	}
320 
321 	t = get_free_thread();
322 
323 	if (t == NULL) {
324 		printk(BIOS_ERR, "%s: No more threads!\n", __func__);
325 		return -1;
326 	}
327 
328 	bbs = thread_alloc_space(t, sizeof(*bbs));
329 	bbs->state = state;
330 	bbs->seq = seq;
331 	prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs);
332 	schedule(t);
333 
334 	return 0;
335 }
336 
thread_yield(void)337 int thread_yield(void)
338 {
339 	return thread_yield_microseconds(0);
340 }
341 
thread_yield_microseconds(unsigned int microsecs)342 int thread_yield_microseconds(unsigned int microsecs)
343 {
344 	struct thread *current;
345 	struct timeout_callback tocb;
346 
347 	current = current_thread();
348 
349 	if (!thread_can_yield(current))
350 		return -1;
351 
352 	if (thread_yield_timed_callback(&tocb, microsecs))
353 		return -1;
354 
355 	return 0;
356 }
357 
thread_coop_enable(void)358 void thread_coop_enable(void)
359 {
360 	struct thread *current;
361 
362 	current = current_thread();
363 
364 	if (current == NULL)
365 		return;
366 
367 	assert(current->can_yield <= 0);
368 
369 	current->can_yield++;
370 }
371 
thread_coop_disable(void)372 void thread_coop_disable(void)
373 {
374 	struct thread *current;
375 
376 	current = current_thread();
377 
378 	if (current == NULL)
379 		return;
380 
381 	current->can_yield--;
382 }
383 
thread_join(struct thread_handle * handle)384 enum cb_err thread_join(struct thread_handle *handle)
385 {
386 	struct stopwatch sw;
387 	struct thread *current = current_thread();
388 
389 	assert(handle);
390 	assert(current);
391 	assert(current->handle != handle);
392 
393 	if (handle->state == THREAD_UNINITIALIZED)
394 		return CB_ERR_ARG;
395 
396 	printk(BIOS_SPEW, "waiting for thread\n");
397 
398 	stopwatch_init(&sw);
399 
400 	while (handle->state != THREAD_DONE)
401 		assert(thread_yield() == 0);
402 
403 	printk(BIOS_SPEW, "took %lld us\n", stopwatch_duration_usecs(&sw));
404 
405 	return handle->error;
406 }
407 
thread_mutex_lock(struct thread_mutex * mutex)408 void thread_mutex_lock(struct thread_mutex *mutex)
409 {
410 	struct stopwatch sw;
411 
412 	stopwatch_init(&sw);
413 
414 	while (mutex->locked)
415 		assert(thread_yield() == 0);
416 	mutex->locked = true;
417 
418 	printk(BIOS_SPEW, "took %lld us to acquire mutex\n", stopwatch_duration_usecs(&sw));
419 }
420 
thread_mutex_unlock(struct thread_mutex * mutex)421 void thread_mutex_unlock(struct thread_mutex *mutex)
422 {
423 	assert(mutex->locked);
424 	mutex->locked = 0;
425 }
426