xref: /aosp_15_r20/trusty/kernel/lib/sm/sm.c (revision 344aa361028b423587d4ef3fa52a23d194628137)
1 /*
2  * Copyright (c) 2013-2016 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <err.h>
25 #include <kernel/event.h>
26 #include <kernel/mutex.h>
27 #include <kernel/thread.h>
28 #include <kernel/vm.h>
29 #include <lib/heap.h>
30 #include <lib/sm.h>
31 #include <lib/sm/sm_err.h>
32 #include <lib/sm/smcall.h>
33 #include <lk/init.h>
34 #include <platform.h>
35 #include <stdatomic.h>
36 #include <string.h>
37 #include <sys/types.h>
38 #include <trace.h>
39 #include <version.h>
40 
41 #define LOCAL_TRACE 0
42 
43 struct sm_std_call_state {
44     spin_lock_t lock;
45     event_t event;
46     struct smc32_args args;
47     long ret;
48     bool done;
49     int active_cpu;  /* cpu that expects stdcall result */
50     int initial_cpu; /* Debug info: cpu that started stdcall */
51     int last_cpu;    /* Debug info: most recent cpu expecting stdcall result */
52     int restart_count;
53 };
54 
55 extern unsigned long monitor_vector_table;
56 extern ulong lk_boot_args[4];
57 
58 static void* boot_args;
59 static int boot_args_refcnt;
60 static mutex_t boot_args_lock = MUTEX_INITIAL_VALUE(boot_args_lock);
61 static atomic_uint_fast32_t sm_api_version;
62 static atomic_uint_fast32_t sm_api_version_min;
63 static atomic_uint_fast32_t sm_api_version_max = TRUSTY_API_VERSION_CURRENT;
64 static spin_lock_t sm_api_version_lock;
65 static atomic_bool platform_halted;
66 
67 static event_t nsirqevent[SMP_MAX_CPUS];
68 static thread_t* nsirqthreads[SMP_MAX_CPUS];
69 static thread_t* nsidlethreads[SMP_MAX_CPUS];
70 static thread_t* stdcallthread;
71 static bool irq_thread_ready[SMP_MAX_CPUS];
72 struct sm_std_call_state stdcallstate = {
73         .event = EVENT_INITIAL_VALUE(stdcallstate.event, 0, 0),
74         .active_cpu = -1,
75         .initial_cpu = -1,
76         .last_cpu = -1,
77 };
78 
79 extern smc32_handler_t sm_stdcall_table[];
80 extern smc32_handler_t sm_nopcall_table[];
81 extern smc32_handler_t sm_fastcall_table[];
82 
smc_sm_api_version(struct smc32_args * args)83 long smc_sm_api_version(struct smc32_args* args) {
84     uint32_t api_version = args->params[0];
85 
86     spin_lock(&sm_api_version_lock);
87     LTRACEF("request api version %d\n", api_version);
88     if (api_version > sm_api_version_max) {
89         api_version = sm_api_version_max;
90     }
91 
92     if (api_version < sm_api_version_min) {
93         TRACEF("ERROR: Tried to select incompatible api version %d < %d, current version %d\n",
94                api_version, sm_api_version_min, sm_api_version);
95         api_version = sm_api_version;
96     } else {
97         /* Update and lock the version to prevent downgrade */
98         sm_api_version = api_version;
99         sm_api_version_min = api_version;
100     }
101     spin_unlock(&sm_api_version_lock);
102 
103     LTRACEF("return api version %d\n", api_version);
104     return api_version;
105 }
106 
smc_get_smp_max_cpus(struct smc32_args * args)107 long smc_get_smp_max_cpus(struct smc32_args* args) {
108     return SMP_MAX_CPUS;
109 }
110 
sm_get_api_version(void)111 uint32_t sm_get_api_version(void) {
112     return sm_api_version;
113 }
114 
sm_check_and_lock_api_version(uint32_t version_wanted)115 bool sm_check_and_lock_api_version(uint32_t version_wanted) {
116     spin_lock_saved_state_t state;
117 
118     DEBUG_ASSERT(version_wanted > 0);
119 
120     if (sm_api_version_min >= version_wanted) {
121         return true;
122     }
123     if (sm_api_version_max < version_wanted) {
124         return false;
125     }
126 
127     spin_lock_save(&sm_api_version_lock, &state, SPIN_LOCK_FLAG_IRQ_FIQ);
128     if (sm_api_version < version_wanted) {
129         sm_api_version_max = MIN(sm_api_version_max, version_wanted - 1);
130         TRACEF("max api version set: %d\n", sm_api_version_max);
131     } else {
132         sm_api_version_min = MAX(sm_api_version_min, version_wanted);
133         TRACEF("min api version set: %d\n", sm_api_version_min);
134     }
135     DEBUG_ASSERT(sm_api_version_min <= sm_api_version_max);
136     DEBUG_ASSERT(sm_api_version >= sm_api_version_min);
137     DEBUG_ASSERT(sm_api_version <= sm_api_version_max);
138 
139     spin_unlock_restore(&sm_api_version_lock, state, SPIN_LOCK_FLAG_IRQ_FIQ);
140 
141     return sm_api_version_min >= version_wanted;
142 }
143 
sm_stdcall_loop(void * arg)144 static int __NO_RETURN sm_stdcall_loop(void* arg) {
145     long ret;
146     spin_lock_saved_state_t state;
147 
148     while (true) {
149         LTRACEF("cpu %d, wait for stdcall\n", arch_curr_cpu_num());
150         event_wait(&stdcallstate.event);
151 
152         /* Dispatch 'standard call' handler */
153         LTRACEF("cpu %d, got stdcall: 0x%x, 0x%x, 0x%x, 0x%x\n",
154                 arch_curr_cpu_num(), stdcallstate.args.smc_nr,
155                 stdcallstate.args.params[0], stdcallstate.args.params[1],
156                 stdcallstate.args.params[2]);
157         ret = sm_stdcall_table[SMC_ENTITY(stdcallstate.args.smc_nr)](
158                 &stdcallstate.args);
159         LTRACEF("cpu %d, stdcall(0x%x, 0x%x, 0x%x, 0x%x) returned 0x%lx (%ld)\n",
160                 arch_curr_cpu_num(), stdcallstate.args.smc_nr,
161                 stdcallstate.args.params[0], stdcallstate.args.params[1],
162                 stdcallstate.args.params[2], ret, ret);
163         spin_lock_save(&stdcallstate.lock, &state, SPIN_LOCK_FLAG_IRQ);
164         stdcallstate.ret = ret;
165         stdcallstate.done = true;
166         event_unsignal(&stdcallstate.event);
167         spin_unlock_restore(&stdcallstate.lock, state, SPIN_LOCK_FLAG_IRQ);
168     }
169 }
170 
171 /* must be called with irqs disabled */
sm_queue_stdcall(struct smc32_args * args)172 static long sm_queue_stdcall(struct smc32_args* args) {
173     long ret;
174     uint cpu = arch_curr_cpu_num();
175 
176     spin_lock(&stdcallstate.lock);
177 
178     if (stdcallstate.event.signaled || stdcallstate.done) {
179         if (args->smc_nr == SMC_SC_RESTART_LAST &&
180             stdcallstate.args.client_id != args->client_id) {
181             dprintf(CRITICAL,
182                     "%s: cpu %d, unexpected restart, "
183                     "client %" PRIx64 " != %" PRIx64 "\n",
184                     __func__, cpu, stdcallstate.args.client_id,
185                     args->client_id);
186             ret = SM_ERR_UNEXPECTED_RESTART;
187             goto err;
188         } else if (args->smc_nr == SMC_SC_RESTART_LAST &&
189                    stdcallstate.active_cpu == -1) {
190             stdcallstate.restart_count++;
191             LTRACEF_LEVEL(3, "cpu %d, restart std call, restart_count %d\n",
192                           cpu, stdcallstate.restart_count);
193             goto restart_stdcall;
194         }
195         dprintf(CRITICAL, "%s: cpu %d, std call busy\n", __func__, cpu);
196         ret = SM_ERR_BUSY;
197         goto err;
198     } else {
199         if (args->smc_nr == SMC_SC_RESTART_LAST) {
200             dprintf(CRITICAL,
201                     "%s: cpu %d, unexpected restart, no std call active\n",
202                     __func__, arch_curr_cpu_num());
203             ret = SM_ERR_UNEXPECTED_RESTART;
204             goto err;
205         }
206     }
207 
208     LTRACEF("cpu %d, queue std call 0x%x\n", cpu, args->smc_nr);
209     stdcallstate.initial_cpu = cpu;
210     stdcallstate.ret = SM_ERR_INTERNAL_FAILURE;
211     stdcallstate.args = *args;
212     stdcallstate.restart_count = 0;
213     event_signal(&stdcallstate.event, false);
214 
215 restart_stdcall:
216     stdcallstate.active_cpu = cpu;
217     ret = 0;
218 
219 err:
220     spin_unlock(&stdcallstate.lock);
221 
222     return ret;
223 }
224 
sm_sched_nonsecure_fiq_loop(long ret,struct smc32_args * args)225 static void sm_sched_nonsecure_fiq_loop(long ret, struct smc32_args* args) {
226     while (true) {
227         if (atomic_load(&platform_halted)) {
228             ret = SM_ERR_PANIC;
229         }
230         sm_sched_nonsecure(ret, args);
231         if (atomic_load(&platform_halted) && args->smc_nr != SMC_FC_FIQ_ENTER) {
232             continue;
233         }
234         if (SMC_IS_SMC64(args->smc_nr)) {
235             ret = SM_ERR_NOT_SUPPORTED;
236             continue;
237         }
238         if (!SMC_IS_FASTCALL(args->smc_nr)) {
239             break;
240         }
241         ret = sm_fastcall_table[SMC_ENTITY(args->smc_nr)](args);
242     }
243 }
244 
245 /* must be called with irqs disabled */
sm_return_and_wait_for_next_stdcall(long ret,int cpu)246 static enum handler_return sm_return_and_wait_for_next_stdcall(long ret,
247                                                                int cpu) {
248     struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
249 
250     do {
251 #if ARCH_HAS_FIQ
252         arch_disable_fiqs();
253 #endif
254         sm_sched_nonsecure_fiq_loop(ret, &args);
255 #if ARCH_HAS_FIQ
256         arch_enable_fiqs();
257 #endif
258 
259         /* Allow concurrent SMC_SC_NOP calls on multiple cpus */
260         if (args.smc_nr == SMC_SC_NOP) {
261             LTRACEF_LEVEL(3, "cpu %d, got nop\n", cpu);
262             ret = sm_nopcall_table[SMC_ENTITY(args.params[0])](&args);
263         } else {
264             ret = sm_queue_stdcall(&args);
265         }
266     } while (ret);
267 
268     return sm_intc_enable_interrupts();
269 }
270 
sm_irq_return_ns(void)271 static void sm_irq_return_ns(void) {
272     long ret;
273     int cpu;
274 
275     cpu = arch_curr_cpu_num();
276 
277     spin_lock(&stdcallstate.lock); /* TODO: remove? */
278     LTRACEF_LEVEL(2, "got irq on cpu %d, stdcallcpu %d\n", cpu,
279                   stdcallstate.active_cpu);
280     if (stdcallstate.active_cpu == cpu) {
281         stdcallstate.last_cpu = stdcallstate.active_cpu;
282         stdcallstate.active_cpu = -1;
283         ret = SM_ERR_INTERRUPTED;
284     } else {
285         ret = SM_ERR_NOP_INTERRUPTED;
286     }
287     LTRACEF_LEVEL(2, "got irq on cpu %d, return %ld\n", cpu, ret);
288     spin_unlock(&stdcallstate.lock);
289     sm_return_and_wait_for_next_stdcall(ret, cpu);
290 }
291 
sm_irq_loop(void * arg)292 static int __NO_RETURN sm_irq_loop(void* arg) {
293     int cpu;
294     /* cpu that requested this thread, the current cpu could be different */
295     int eventcpu = (uintptr_t)arg;
296 
297     /*
298      * Run this thread with interrupts masked, so we don't reenter the
299      * interrupt handler. The interrupt handler for non-secure interrupts
300      * returns to this thread with the interrupt still pending.
301      */
302     arch_disable_ints();
303     irq_thread_ready[eventcpu] = true;
304 
305     cpu = arch_curr_cpu_num();
306     LTRACEF("wait for irqs for cpu %d, on cpu %d\n", eventcpu, cpu);
307     while (true) {
308         event_wait(&nsirqevent[eventcpu]);
309         sm_irq_return_ns();
310     }
311 }
312 
313 /* must be called with irqs disabled */
sm_get_stdcall_ret(void)314 static long sm_get_stdcall_ret(void) {
315     long ret;
316     uint cpu = arch_curr_cpu_num();
317 
318     spin_lock(&stdcallstate.lock);
319 
320     if (stdcallstate.active_cpu != (int)cpu) {
321         dprintf(CRITICAL, "%s: stdcallcpu, a%d != curr-cpu %d, l%d, i%d\n",
322                 __func__, stdcallstate.active_cpu, cpu, stdcallstate.last_cpu,
323                 stdcallstate.initial_cpu);
324         ret = SM_ERR_INTERNAL_FAILURE;
325         goto err;
326     }
327     stdcallstate.last_cpu = stdcallstate.active_cpu;
328     stdcallstate.active_cpu = -1;
329 
330     if (stdcallstate.done) {
331         stdcallstate.done = false;
332         ret = stdcallstate.ret;
333         LTRACEF("cpu %d, return stdcall result, %ld, initial cpu %d\n", cpu,
334                 stdcallstate.ret, stdcallstate.initial_cpu);
335     } else {
336         if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_SMP))
337             ret = SM_ERR_CPU_IDLE; /* ns using smp api */
338         else if (stdcallstate.restart_count)
339             ret = SM_ERR_BUSY;
340         else
341             ret = SM_ERR_INTERRUPTED;
342         LTRACEF("cpu %d, initial cpu %d, restart_count %d, std call not finished, return %ld\n",
343                 cpu, stdcallstate.initial_cpu, stdcallstate.restart_count, ret);
344     }
345 err:
346     spin_unlock(&stdcallstate.lock);
347 
348     return ret;
349 }
350 
enter_smcall_critical_section(void)351 static uint enter_smcall_critical_section(void) {
352     /*
353      * Disable interrupts so stdcallstate.active_cpu does not
354      * change to or from this cpu after checking it in the critical
355      * section.
356      */
357     arch_disable_ints();
358 
359     /* Switch to sm-stdcall if sm_queue_stdcall woke it up */
360     thread_yield();
361 
362     return arch_curr_cpu_num();
363 }
364 
exit_smcall_critical_section(long ret,uint cpu)365 static void exit_smcall_critical_section(long ret, uint cpu) {
366     enum handler_return resched;
367 
368     resched = sm_return_and_wait_for_next_stdcall(ret, cpu);
369     if (resched == INT_RESCHEDULE)
370         thread_preempt();
371 
372     /* Re-enable interrupts (needed for SMC_SC_NOP) */
373     arch_enable_ints();
374 }
375 
sm_wait_for_smcall(void * arg)376 static int sm_wait_for_smcall(void* arg) {
377     int cpu;
378     long ret = 0;
379 
380     LTRACEF("wait for stdcalls, on cpu %d\n", arch_curr_cpu_num());
381 
382     while (true) {
383         cpu = enter_smcall_critical_section();
384 
385         if (cpu == stdcallstate.active_cpu)
386             ret = sm_get_stdcall_ret();
387         else
388             ret = SM_ERR_NOP_DONE;
389 
390         exit_smcall_critical_section(ret, cpu);
391     }
392 }
393 
394 #if WITH_LIB_SM_MONITOR
395 /* per-cpu secure monitor initialization */
sm_mon_percpu_init(uint level)396 static void sm_mon_percpu_init(uint level) {
397     /* let normal world enable SMP, lock TLB, access CP10/11 */
398     __asm__ volatile(
399             "mrc	p15, 0, r1, c1, c1, 2	\n"
400             "orr	r1, r1, #0xC00		\n"
401             "orr	r1, r1, #0x60000	\n"
402             "mcr	p15, 0, r1, c1, c1, 2	@ NSACR	\n"
403             :
404             :
405             : "r1");
406 
407     __asm__ volatile("mcr	p15, 0, %0, c12, c0, 1	\n"
408                      :
409                      : "r"(&monitor_vector_table));
410 }
411 LK_INIT_HOOK_FLAGS(libsm_mon_perrcpu,
412                    sm_mon_percpu_init,
413                    LK_INIT_LEVEL_PLATFORM - 3,
414                    LK_INIT_FLAG_ALL_CPUS);
415 #endif
416 
sm_init(uint level)417 static void sm_init(uint level) {
418     status_t err;
419     char name[32];
420 
421     mutex_acquire(&boot_args_lock);
422 
423     /* Map the boot arguments if supplied by the bootloader */
424     if (lk_boot_args[1] && lk_boot_args[2]) {
425         ulong offset = lk_boot_args[1] & (PAGE_SIZE - 1);
426         paddr_t paddr = round_down(lk_boot_args[1], PAGE_SIZE);
427         size_t size = round_up(lk_boot_args[2] + offset, PAGE_SIZE);
428         void* vptr;
429 
430         err = vmm_alloc_physical(vmm_get_kernel_aspace(), "sm", size, &vptr,
431                                  PAGE_SIZE_SHIFT, paddr, 0,
432                                  ARCH_MMU_FLAG_NS |
433                                          ARCH_MMU_FLAG_PERM_NO_EXECUTE |
434                                          ARCH_MMU_FLAG_CACHED);
435         if (!err) {
436             boot_args = (uint8_t*)vptr + offset;
437             boot_args_refcnt++;
438         } else {
439             boot_args = NULL;
440             TRACEF("Error mapping boot parameter block: %d\n", err);
441         }
442     }
443 
444     mutex_release(&boot_args_lock);
445 
446     for (int cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
447         event_init(&nsirqevent[cpu], false, EVENT_FLAG_AUTOUNSIGNAL);
448 
449         snprintf(name, sizeof(name), "irq-ns-switch-%d", cpu);
450         nsirqthreads[cpu] =
451                 thread_create(name, sm_irq_loop, (void*)(uintptr_t)cpu,
452                               HIGHEST_PRIORITY, DEFAULT_STACK_SIZE);
453         if (!nsirqthreads[cpu]) {
454             panic("failed to create irq NS switcher thread for cpu %d!\n", cpu);
455         }
456         thread_set_pinned_cpu(nsirqthreads[cpu], cpu);
457         thread_set_real_time(nsirqthreads[cpu]);
458 
459         snprintf(name, sizeof(name), "idle-ns-switch-%d", cpu);
460         nsidlethreads[cpu] =
461                 thread_create(name, sm_wait_for_smcall, NULL,
462                               LOWEST_PRIORITY + 1, DEFAULT_STACK_SIZE);
463         if (!nsidlethreads[cpu]) {
464             panic("failed to create idle NS switcher thread for cpu %d!\n",
465                   cpu);
466         }
467         thread_set_pinned_cpu(nsidlethreads[cpu], cpu);
468         thread_set_real_time(nsidlethreads[cpu]);
469     }
470 
471     stdcallthread = thread_create("sm-stdcall", sm_stdcall_loop, NULL,
472                                   LOWEST_PRIORITY + 2, DEFAULT_STACK_SIZE);
473     if (!stdcallthread) {
474         panic("failed to create sm-stdcall thread!\n");
475     }
476     thread_set_real_time(stdcallthread);
477     thread_resume(stdcallthread);
478 }
479 
480 LK_INIT_HOOK(libsm, sm_init, LK_INIT_LEVEL_PLATFORM - 1);
481 
sm_handle_irq(void)482 enum handler_return sm_handle_irq(void) {
483     int cpu = arch_curr_cpu_num();
484     if (irq_thread_ready[cpu]) {
485         event_signal(&nsirqevent[cpu], false);
486     } else {
487         TRACEF("warning: got ns irq before irq thread is ready\n");
488         sm_irq_return_ns();
489     }
490 
491     return INT_RESCHEDULE;
492 }
493 
sm_handle_fiq(void)494 void sm_handle_fiq(void) {
495     uint32_t expected_return;
496     struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
497     if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_RESTART_FIQ)) {
498         sm_sched_nonsecure_fiq_loop(SM_ERR_FIQ_INTERRUPTED, &args);
499         expected_return = SMC_SC_RESTART_FIQ;
500     } else {
501         sm_sched_nonsecure_fiq_loop(SM_ERR_INTERRUPTED, &args);
502         expected_return = SMC_SC_RESTART_LAST;
503     }
504     if (args.smc_nr != expected_return) {
505         TRACEF("got bad restart smc %x, expected %x\n", args.smc_nr,
506                expected_return);
507         while (args.smc_nr != expected_return)
508             sm_sched_nonsecure_fiq_loop(SM_ERR_INTERLEAVED_SMC, &args);
509     }
510 }
511 
platform_halt(platform_halt_action suggested_action,platform_halt_reason reason)512 void platform_halt(platform_halt_action suggested_action,
513                    platform_halt_reason reason) {
514     bool already_halted;
515     struct smc32_args args = SMC32_ARGS_INITIAL_VALUE(args);
516 
517     arch_disable_ints();
518     already_halted = atomic_exchange(&platform_halted, true);
519     if (!already_halted) {
520         for (int cpu = 0; cpu < SMP_MAX_CPUS; cpu++) {
521             if (nsirqthreads[cpu]) {
522                 event_signal(&nsirqevent[cpu], false);
523             }
524         }
525         dprintf(ALWAYS, "%s\n", lk_version);
526         dprintf(ALWAYS, "HALT: (reason = %d)\n", reason);
527     }
528 
529 #if ARCH_HAS_FIQ
530     arch_disable_fiqs();
531 #endif
532     while (true)
533         sm_sched_nonsecure_fiq_loop(SM_ERR_PANIC, &args);
534 }
535 
sm_get_boot_args(void ** boot_argsp,size_t * args_sizep)536 status_t sm_get_boot_args(void** boot_argsp, size_t* args_sizep) {
537     status_t err = NO_ERROR;
538 
539     if (!boot_argsp || !args_sizep)
540         return ERR_INVALID_ARGS;
541 
542     mutex_acquire(&boot_args_lock);
543 
544     if (!boot_args) {
545         err = ERR_NOT_CONFIGURED;
546         goto unlock;
547     }
548 
549     boot_args_refcnt++;
550     *boot_argsp = boot_args;
551     *args_sizep = lk_boot_args[2];
552 unlock:
553     mutex_release(&boot_args_lock);
554     return err;
555 }
556 
resume_nsthreads(void)557 static void resume_nsthreads(void) {
558     int i;
559 
560     for (i = 0; i < SMP_MAX_CPUS; i++) {
561         DEBUG_ASSERT(nsirqthreads[i]);
562         DEBUG_ASSERT(nsidlethreads[i]);
563 
564         thread_resume(nsirqthreads[i]);
565         thread_resume(nsidlethreads[i]);
566     }
567 }
568 
sm_put_boot_args(void)569 void sm_put_boot_args(void) {
570     mutex_acquire(&boot_args_lock);
571 
572     if (!boot_args) {
573         TRACEF("WARNING: caller does not own "
574                "a reference to boot parameters\n");
575         goto unlock;
576     }
577 
578     boot_args_refcnt--;
579     if (boot_args_refcnt == 0) {
580         vmm_free_region(vmm_get_kernel_aspace(), (vaddr_t)boot_args);
581         boot_args = NULL;
582         resume_nsthreads();
583     }
584 unlock:
585     mutex_release(&boot_args_lock);
586 }
587 
sm_release_boot_args(uint level)588 static void sm_release_boot_args(uint level) {
589     if (boot_args) {
590         sm_put_boot_args();
591     } else {
592         /* we need to resume the ns-switcher here if
593          * the boot loader didn't pass bootargs
594          */
595         resume_nsthreads();
596     }
597 
598     if (boot_args)
599         TRACEF("WARNING: outstanding reference to boot args"
600                "at the end of initialzation!\n");
601 }
602 
603 LK_INIT_HOOK(libsm_bootargs, sm_release_boot_args, LK_INIT_LEVEL_LAST);
604