xref: /aosp_15_r20/trusty/kernel/lib/trusty/trusty_app.c (revision 344aa361028b423587d4ef3fa52a23d194628137)
1 /*
2  * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved
3  * Copyright (c) 2013, Google, Inc. All rights reserved
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files
7  * (the "Software"), to deal in the Software without restriction,
8  * including without limitation the rights to use, copy, modify, merge,
9  * publish, distribute, sublicense, and/or sell copies of the Software,
10  * and to permit persons to whom the Software is furnished to do so,
11  * subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <lib/backtrace/backtrace.h>
26 #include <lib/trusty/elf.h>
27 #include <lib/trusty/trusty_app.h>
28 
29 #include <arch.h>
30 #include <assert.h>
31 #include <compiler.h>
32 #include <debug.h>
33 #include <err.h>
34 #include <inttypes.h>
35 #include <kernel/event.h>
36 #include <kernel/mutex.h>
37 #include <kernel/thread.h>
38 #include <lib/app_manifest/app_manifest.h>
39 #include <lib/rand/rand.h>
40 #include <lib/trusty/ipc.h>
41 #include <lk/init.h>
42 #include <malloc.h>
43 #include <platform.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <sys/types.h>
48 #include <trace.h>
49 #include <uapi/mm.h>
50 #include <version.h>
51 
52 #define LOCAL_TRACE 0
53 
54 #define NS2MS_CEIL(ns) DIV_ROUND_UP(ns, 1000000ULL)
55 
56 #define DEFAULT_MGMT_FLAGS APP_MANIFEST_MGMT_FLAGS_NONE
57 
58 #define TRUSTY_APP_RESTART_TIMEOUT_SUCCESS (10ULL * 1000ULL * 1000ULL)
59 #define TRUSTY_APP_RESTART_TIMEOUT_FAILURE (5ULL * 1000ULL * 1000ULL * 1000ULL)
60 
61 #ifdef TRUSTY_APP_STACK_TOP
62 #error "TRUSTY_APP_STACK_TOP is no longer respected"
63 #endif
64 
65 /* Don't allow NULL to be a valid userspace address */
66 STATIC_ASSERT(USER_ASPACE_BASE != 0);
67 
68 #ifndef DEFAULT_HEAP_SIZE
69 #define DEFAULT_HEAP_SIZE (4 * PAGE_SIZE)
70 #endif
71 
72 #define PAGE_MASK (PAGE_SIZE - 1)
73 
74 #undef ELF_64BIT
75 #if !IS_64BIT || USER_32BIT
76 #define ELF_64BIT 0
77 #else
78 #define ELF_64BIT 1
79 #endif
80 
81 #if ELF_64BIT
82 #define ELF_NHDR Elf64_Nhdr
83 #define ELF_SHDR Elf64_Shdr
84 #define ELF_EHDR Elf64_Ehdr
85 #define ELF_PHDR Elf64_Phdr
86 #define Elf_Addr Elf64_Addr
87 #define Elf_Off Elf64_Off
88 #define Elf_Word Elf64_Word
89 
90 #define PRIxELF_Off PRIx64
91 #define PRIuELF_Size PRIu64
92 #define PRIxELF_Size PRIx64
93 #define PRIxELF_Addr PRIx64
94 #define PRIxELF_Flags PRIx64
95 #else
96 #define ELF_NHDR Elf32_Nhdr
97 #define ELF_SHDR Elf32_Shdr
98 #define ELF_EHDR Elf32_Ehdr
99 #define ELF_PHDR Elf32_Phdr
100 #define Elf_Addr Elf32_Addr
101 #define Elf_Off Elf32_Off
102 #define Elf_Word Elf32_Word
103 
104 #define PRIxELF_Off PRIx32
105 #define PRIuELF_Size PRIu32
106 #define PRIxELF_Size PRIx32
107 #define PRIxELF_Addr PRIx32
108 #define PRIxELF_Flags PRIx32
109 #endif
110 
111 static u_int trusty_next_app_id;
112 static struct list_node trusty_app_list = LIST_INITIAL_VALUE(trusty_app_list);
113 
114 struct trusty_builtin_app_img {
115     intptr_t manifest_start;
116     intptr_t manifest_end;
117     intptr_t img_start;
118     intptr_t img_end;
119 };
120 
121 /* These symbols are linker defined and are declared as unsized arrays to
122  * prevent compiler(clang) optimizations that break when the list is empty and
123  * the symbols alias
124  */
125 extern struct trusty_builtin_app_img __trusty_app_list_start[];
126 extern struct trusty_builtin_app_img __trusty_app_list_end[];
127 
128 static bool apps_started;
129 static mutex_t apps_lock = MUTEX_INITIAL_VALUE(apps_lock);
130 static struct list_node app_notifier_list =
131         LIST_INITIAL_VALUE(app_notifier_list);
132 uint als_slot_cnt;
133 static event_t app_mgr_event =
134         EVENT_INITIAL_VALUE(app_mgr_event, 0, EVENT_FLAG_AUTOUNSIGNAL);
135 
136 static struct list_node allowed_mmio_ranges_list =
137         LIST_INITIAL_VALUE(allowed_mmio_ranges_list);
138 
139 #define PRINT_TRUSTY_APP_UUID(level, tid, u)                                                       \
140     dprintf((level),                                                                               \
141             "trusty_app %d uuid: 0x%08xx 0x%04xx 0x%04xx 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n", \
142             tid, (u)->time_low, (u)->time_mid, (u)->time_hi_and_version,                           \
143             (u)->clock_seq_and_node[0], (u)->clock_seq_and_node[1],                                \
144             (u)->clock_seq_and_node[2], (u)->clock_seq_and_node[3],                                \
145             (u)->clock_seq_and_node[4], (u)->clock_seq_and_node[5],                                \
146             (u)->clock_seq_and_node[6], (u)->clock_seq_and_node[7]);
147 
address_range_within_bounds(const void * range_start,size_t range_size,const void * lower_bound,const void * upper_bound)148 static bool address_range_within_bounds(const void* range_start,
149                                         size_t range_size,
150                                         const void* lower_bound,
151                                         const void* upper_bound) {
152     const void* range_end = range_start + range_size;
153 
154     if (upper_bound < lower_bound) {
155         LTRACEF("upper bound(%p) is below upper bound(%p)\n", upper_bound,
156                 lower_bound);
157         return false;
158     }
159 
160     if (range_end < range_start) {
161         LTRACEF("Range overflows. start:%p size:%zd end:%p\n", range_start,
162                 range_size, range_end);
163         return false;
164     }
165 
166     if (range_start < lower_bound) {
167         LTRACEF("Range starts(%p) before lower bound(%p)\n", range_start,
168                 lower_bound);
169         return false;
170     }
171 
172     if (range_end > upper_bound) {
173         LTRACEF("Range ends(%p) past upper bound(%p)\n", range_end,
174                 upper_bound);
175         return false;
176     }
177 
178     return true;
179 }
180 
address_range_within_img(const void * range_start,size_t range_size,const struct trusty_app_img * appimg)181 static inline bool address_range_within_img(
182         const void* range_start,
183         size_t range_size,
184         const struct trusty_app_img* appimg) {
185     return address_range_within_bounds(range_start, range_size,
186                                        (const void*)appimg->img_start,
187                                        (const void*)appimg->img_end);
188 }
189 
trusty_app_allow_mmio_range(struct trusty_app_mmio_allowed_range * range)190 void trusty_app_allow_mmio_range(struct trusty_app_mmio_allowed_range* range) {
191     DEBUG_ASSERT(range);
192 
193     if (!range->size) {
194         dprintf(CRITICAL, "Allowed mmio range is empty\n");
195         return;
196     }
197 
198     mutex_acquire(&apps_lock);
199     list_add_tail(&allowed_mmio_ranges_list, &range->node);
200     mutex_release(&apps_lock);
201 }
202 
203 /**
204  * app_mmio_is_allowed() - Check whether an app is allowed to map a given
205  *                         physical memory range.
206  * @trusty_app: The application to check.
207  * @mmio_start: The start of the physical memory range to map.
208  * @mmio_size:  The size of the physical memory range.
209  *
210  * For security reasons, we do not want to allow any loadable app to map
211  * physical memory by default. However, some specific apps need to
212  * map device memory, so we maintain an allowlist of per-app ranges that
213  * can be mapped. This function checks a given physical memory range for the
214  * loadable app at @trusty_app against the allowlist. Each project can add its
215  * own allowlist entries using @trusty_app_allow_mmio_range.
216  */
app_mmio_is_allowed(struct trusty_app * trusty_app,paddr_t mmio_start,size_t mmio_size)217 static bool app_mmio_is_allowed(struct trusty_app* trusty_app,
218                                 paddr_t mmio_start,
219                                 size_t mmio_size) {
220     if (!(trusty_app->flags & APP_FLAGS_LOADABLE)) {
221         return true;
222     }
223 
224     DEBUG_ASSERT(mmio_size);
225     DEBUG_ASSERT(is_mutex_held(&apps_lock));
226 
227     paddr_t mmio_end = mmio_start + (mmio_size - 1);
228     const struct trusty_app_mmio_allowed_range* range;
229     list_for_every_entry(&allowed_mmio_ranges_list, range,
230                          struct trusty_app_mmio_allowed_range, node) {
231         DEBUG_ASSERT(range->size);
232         paddr_t range_end = range->start + (range->size - 1);
233         if (!memcmp(&range->uuid, &trusty_app->props.uuid, sizeof(uuid_t)) &&
234             mmio_start >= range->start && mmio_end <= range_end) {
235             return true;
236         }
237     }
238 
239     return false;
240 }
241 
242 /**
243  * struct trusty_app_dma_allowed_range - Prepared dma range for trusty app.
244  * @node:  Internal list node, should be initialized to
245  *         %LIST_INITIAL_CLEARED_VALUE
246  * @app:   Pointer to the trusty app which prepared this range
247  * @slice: Represents a virtual memory range prepared for dma. Can be used to
248  *         get the physical pages used for dma
249  * @vaddr: Virtual memory address. Used to tear down all mappings from a single
250  *         call to prepare_dma
251  * @flags: Flags used to map dma range
252  */
253 struct trusty_app_dma_allowed_range {
254     struct list_node node;
255     struct vmm_obj_slice slice;
256     vaddr_t vaddr;
257     uint32_t flags;
258 };
259 
trusty_app_allow_dma_range(struct trusty_app * app,struct vmm_obj * obj,size_t offset,size_t size,vaddr_t vaddr,uint32_t flags)260 status_t trusty_app_allow_dma_range(struct trusty_app* app,
261                                     struct vmm_obj* obj,
262                                     size_t offset,
263                                     size_t size,
264                                     vaddr_t vaddr,
265                                     uint32_t flags) {
266     DEBUG_ASSERT(obj);
267     DEBUG_ASSERT(app);
268     DEBUG_ASSERT(size);
269 
270     /* check that dma range hasn't already been mapped at vaddr */
271     const struct trusty_app_dma_allowed_range* check_range;
272     list_for_every_entry(&app->props.dma_entry_list, check_range,
273                          struct trusty_app_dma_allowed_range, node) {
274         if (check_range->vaddr == vaddr)
275             return ERR_INVALID_ARGS;
276     }
277 
278     struct trusty_app_dma_allowed_range* range_list_entry =
279             (struct trusty_app_dma_allowed_range*)calloc(
280                     1, sizeof(struct trusty_app_dma_allowed_range));
281     if (!range_list_entry) {
282         return ERR_NO_MEMORY;
283     }
284     /* range_list_entry->node is already zero-initialized */
285     vmm_obj_slice_init(&range_list_entry->slice);
286     vmm_obj_slice_bind(&range_list_entry->slice, obj, offset, size);
287     range_list_entry->vaddr = vaddr;
288     range_list_entry->flags = flags;
289 
290     mutex_acquire(&apps_lock);
291     list_add_tail(&app->props.dma_entry_list, &range_list_entry->node);
292     mutex_release(&apps_lock);
293 
294     return NO_ERROR;
295 }
296 
trusty_app_destroy_dma_range(vaddr_t vaddr,size_t size)297 status_t trusty_app_destroy_dma_range(vaddr_t vaddr, size_t size) {
298     status_t ret = ERR_NOT_FOUND;
299     struct trusty_app_dma_allowed_range* range;
300     struct trusty_app_dma_allowed_range* next_range;
301     struct trusty_app* app = current_trusty_app();
302 
303     mutex_acquire(&apps_lock);
304     list_for_every_entry_safe(&app->props.dma_entry_list, range, next_range,
305                               struct trusty_app_dma_allowed_range, node) {
306         DEBUG_ASSERT(range->slice.size);
307         if (range->vaddr == vaddr && range->slice.size == size) {
308             list_delete(&range->node);
309             vmm_obj_slice_release(&range->slice);
310             free(range);
311             ret = NO_ERROR;
312             break;
313         }
314     }
315 
316     mutex_release(&apps_lock);
317 
318     return ret;
319 }
320 
321 /* Must be called with the apps_lock held */
trusty_app_dma_is_allowed_locked(const struct trusty_app * app,paddr_t paddr)322 static bool trusty_app_dma_is_allowed_locked(const struct trusty_app* app,
323                                              paddr_t paddr) {
324     int ret;
325     size_t offset;
326     const struct trusty_app_dma_allowed_range* range;
327 
328     DEBUG_ASSERT(app);
329     DEBUG_ASSERT(is_mutex_held(&apps_lock));
330     list_for_every_entry(&app->props.dma_entry_list, range,
331                          struct trusty_app_dma_allowed_range, node) {
332         DEBUG_ASSERT(range->slice.size);
333         offset = 0;
334         do {
335             paddr_t prepared_paddr;
336             size_t prepared_paddr_size;
337             ret = range->slice.obj->ops->get_page(
338                     range->slice.obj, range->slice.offset + offset,
339                     &prepared_paddr, &prepared_paddr_size);
340             if (ret != NO_ERROR) {
341                 TRACEF("failed to get pages for paddr 0x%" PRIxPADDR "\n",
342                        paddr);
343                 return false;
344             }
345             paddr_t prepared_paddr_end =
346                     prepared_paddr + (prepared_paddr_size - 1);
347             if (paddr >= prepared_paddr && paddr <= prepared_paddr_end) {
348                 return true;
349             }
350             offset += MIN(range->slice.size - offset, prepared_paddr_size);
351         } while (offset < range->slice.size &&
352                  (range->flags & DMA_FLAG_MULTI_PMEM));
353     }
354 
355     TRACEF("paddr 0x%" PRIxPADDR " is not valid for dma\n", paddr);
356     return false;
357 }
358 
trusty_app_dma_is_allowed(const struct trusty_app * app,paddr_t paddr)359 bool trusty_app_dma_is_allowed(const struct trusty_app* app, paddr_t paddr) {
360     bool res;
361     mutex_acquire(&apps_lock);
362     res = trusty_app_dma_is_allowed_locked(app, paddr);
363     mutex_release(&apps_lock);
364     return res;
365 }
366 
finalize_registration(void)367 static void finalize_registration(void) {
368     mutex_acquire(&apps_lock);
369     apps_started = true;
370     mutex_release(&apps_lock);
371 }
372 
trusty_register_app_notifier(struct trusty_app_notifier * n)373 status_t trusty_register_app_notifier(struct trusty_app_notifier* n) {
374     status_t ret = NO_ERROR;
375 
376     mutex_acquire(&apps_lock);
377     if (!apps_started)
378         list_add_tail(&app_notifier_list, &n->node);
379     else
380         ret = ERR_ALREADY_STARTED;
381     mutex_release(&apps_lock);
382     return ret;
383 }
384 
trusty_als_alloc_slot(void)385 int trusty_als_alloc_slot(void) {
386     int ret;
387 
388     mutex_acquire(&apps_lock);
389     if (!apps_started)
390         ret = ++als_slot_cnt;
391     else
392         ret = ERR_ALREADY_STARTED;
393     mutex_release(&apps_lock);
394     return ret;
395 }
396 
397 #if ELF_64BIT
398 #define ENTER_USPACE_FLAGS 0
399 #else
400 #define ENTER_USPACE_FLAGS ARCH_ENTER_USPACE_FLAG_32BIT
401 #endif
402 
403 /*
404  * Allocate space on the user stack.
405  */
user_stack_alloc(struct trusty_thread * trusty_thread,user_size_t data_len,user_size_t align,user_addr_t * stack_ptr)406 static user_addr_t user_stack_alloc(struct trusty_thread* trusty_thread,
407                                     user_size_t data_len,
408                                     user_size_t align,
409                                     user_addr_t* stack_ptr) {
410     user_addr_t ptr = round_down(*stack_ptr - data_len, align);
411     if (ptr < trusty_thread->stack_start - trusty_thread->stack_size) {
412         panic("stack underflow while initializing user space\n");
413     }
414     *stack_ptr = ptr;
415     return ptr;
416 }
417 
418 /*
419  * Copy data to a preallocated spot on the user stack. This should not fail.
420  */
copy_to_user_stack(user_addr_t dst_ptr,const void * data,user_size_t data_len)421 static void copy_to_user_stack(user_addr_t dst_ptr,
422                                const void* data,
423                                user_size_t data_len) {
424     int ret = copy_to_user(dst_ptr, data, data_len);
425     if (ret) {
426         panic("copy_to_user failed %d\n", ret);
427     }
428 }
429 
430 /*
431  * Allocate space on the user stack and fill it with data.
432  */
add_to_user_stack(struct trusty_thread * trusty_thread,const void * data,user_size_t data_len,user_size_t align,user_addr_t * stack_ptr)433 static user_addr_t add_to_user_stack(struct trusty_thread* trusty_thread,
434                                      const void* data,
435                                      user_size_t data_len,
436                                      user_size_t align,
437                                      user_addr_t* stack_ptr) {
438     user_addr_t ptr =
439             user_stack_alloc(trusty_thread, data_len, align, stack_ptr);
440     copy_to_user_stack(ptr, data, data_len);
441     return ptr;
442 }
443 
444 /* TODO share a common header file. */
445 #define AT_PAGESZ 6
446 #define AT_BASE 7
447 #define AT_RANDOM 25
448 #define AT_HWCAP2 26
449 #define HWCAP2_MTE (1 << 18)
450 
451 /*
452  * Pass data to libc on the user stack.
453  * Prevent inlining so that the stack allocations inside this function don't get
454  * trapped on the kernel stack.
455  */
456 static __NO_INLINE user_addr_t
trusty_thread_write_elf_tables(struct trusty_thread * trusty_thread,user_addr_t * stack_ptr,vaddr_t load_bias)457 trusty_thread_write_elf_tables(struct trusty_thread* trusty_thread,
458                                user_addr_t* stack_ptr,
459                                vaddr_t load_bias) {
460     /* Construct the elf tables in reverse order - the stack grows down. */
461 
462     /*
463      * sixteen random bytes
464      */
465     uint8_t rand_bytes[16] = {0};
466     rand_get_bytes(rand_bytes, sizeof(rand_bytes));
467     user_addr_t rand_bytes_addr = add_to_user_stack(
468             trusty_thread, rand_bytes, sizeof(rand_bytes), 1, stack_ptr);
469 
470     const char* app_name = trusty_thread->app->props.app_name;
471     user_addr_t app_name_addr =
472             add_to_user_stack(trusty_thread, app_name, strlen(app_name) + 1,
473                               sizeof(user_addr_t), stack_ptr);
474 
475     bool mte = arch_tagging_enabled();
476     /* auxv */
477     user_addr_t auxv[] = {
478             AT_PAGESZ, PAGE_SIZE,       AT_BASE,   load_bias,
479             AT_RANDOM, rand_bytes_addr, AT_HWCAP2, mte ? HWCAP2_MTE : 0,
480             0};
481     add_to_user_stack(trusty_thread, auxv, sizeof(auxv), sizeof(user_addr_t),
482                       stack_ptr);
483 
484     /* envp - for layout compatibility, unused */
485     user_addr_t envp[] = {
486             0,
487     };
488     add_to_user_stack(trusty_thread, envp, sizeof(envp), sizeof(user_addr_t),
489                       stack_ptr);
490 
491     /* argv. Only argv [0] and argv [1] (terminator) are set. */
492     user_addr_t argv[] = {
493             app_name_addr,
494             0,
495     };
496     add_to_user_stack(trusty_thread, argv, sizeof(argv), sizeof(user_addr_t),
497                       stack_ptr);
498 
499     /* argc. The null terminator is not counted. */
500     user_addr_t argc = countof(argv) - 1;
501     user_addr_t argc_ptr = add_to_user_stack(trusty_thread, &argc, sizeof(argc),
502                                              sizeof(user_addr_t), stack_ptr);
503 
504     return argc_ptr;
505 }
506 
trusty_thread_startup(void * arg)507 static int trusty_thread_startup(void* arg) {
508     struct trusty_thread* trusty_thread = current_trusty_thread();
509 
510     vmm_set_active_aspace(trusty_thread->app->aspace);
511 
512     user_addr_t stack_ptr = trusty_thread->stack_start;
513     user_addr_t elf_tables = trusty_thread_write_elf_tables(
514             trusty_thread, &stack_ptr, trusty_thread->app->load_bias);
515 
516     user_addr_t shadow_stack_base = 0;
517 #if USER_SCS_SUPPORTED
518     shadow_stack_base = trusty_thread->shadow_stack_base;
519 #endif
520 
521     arch_enter_uspace(trusty_thread->entry, stack_ptr, shadow_stack_base,
522                       ENTER_USPACE_FLAGS, elf_tables);
523 
524     __UNREACHABLE;
525 }
526 
trusty_thread_start(struct trusty_thread * trusty_thread)527 static status_t trusty_thread_start(struct trusty_thread* trusty_thread) {
528     DEBUG_ASSERT(trusty_thread && trusty_thread->thread);
529 
530     return thread_resume(trusty_thread->thread);
531 }
532 
trusty_thread_exit(int retcode)533 void __NO_RETURN trusty_thread_exit(int retcode) {
534     struct trusty_thread* trusty_thread = current_trusty_thread();
535     vaddr_t stack_bot;
536 
537     ASSERT(trusty_thread);
538 
539     stack_bot = trusty_thread->stack_start - trusty_thread->stack_size;
540 
541     vmm_free_region(trusty_thread->app->aspace, stack_bot);
542 
543 #if USER_SCS_SUPPORTED
544     if (trusty_thread->shadow_stack_base) {
545         /*
546          * revert the adjustment of shadow_stack_base to reconstruct pointer
547          * returned by vmm_alloc.
548          */
549         size_t size = trusty_thread->shadow_stack_size;
550         size_t adjustment = round_up(size, PAGE_SIZE) - size;
551         vmm_free_region(trusty_thread->app->aspace,
552                         trusty_thread->shadow_stack_base - adjustment);
553     } else {
554         DEBUG_ASSERT(trusty_thread->app->props.min_shadow_stack_size == 0);
555     }
556 #endif
557 
558     thread_exit(retcode);
559 }
560 
trusty_thread_create(const char * name,vaddr_t entry,int priority,size_t stack_size,size_t shadow_stack_size,struct trusty_app * trusty_app)561 static struct trusty_thread* trusty_thread_create(
562         const char* name,
563         vaddr_t entry,
564         int priority,
565         size_t stack_size,
566         size_t shadow_stack_size,
567         struct trusty_app* trusty_app) {
568     struct trusty_thread* trusty_thread;
569     status_t err;
570     vaddr_t stack_bot = 0;
571     stack_size = round_up(stack_size, PAGE_SIZE);
572 
573     trusty_thread = calloc(1, sizeof(struct trusty_thread));
574     if (!trusty_thread)
575         return NULL;
576 
577     err = vmm_alloc(trusty_app->aspace, "stack", stack_size, (void**)&stack_bot,
578                     PAGE_SIZE_SHIFT, 0,
579                     ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
580     if (err != NO_ERROR) {
581         dprintf(CRITICAL,
582                 "failed(%d) to create thread stack(0x%" PRIxVADDR
583                 ") for app %u, %s\n",
584                 err, stack_bot, trusty_app->app_id, trusty_app->props.app_name);
585         goto err_stack;
586     }
587 
588 #if USER_SCS_SUPPORTED
589     vaddr_t shadow_stack_base = 0;
590     if (shadow_stack_size) {
591         err = vmm_alloc(
592                 trusty_app->aspace, "shadow stack", shadow_stack_size,
593                 (void**)&shadow_stack_base, PAGE_SIZE_SHIFT, 0,
594                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
595         if (err != NO_ERROR) {
596             dprintf(CRITICAL,
597                     "failed(%d) to allocate shadow stack(0x%" PRIxVADDR
598                     ") for app %u\n",
599                     err, shadow_stack_base, trusty_app->app_id);
600             goto err_shadow_stack;
601         }
602     }
603 #endif
604 
605     trusty_thread->thread = thread_create(name, trusty_thread_startup, NULL,
606                                           priority, DEFAULT_STACK_SIZE);
607     if (!trusty_thread->thread)
608         goto err_thread;
609 
610     trusty_thread->app = trusty_app;
611     trusty_thread->entry = entry;
612     trusty_thread->stack_start = stack_bot + stack_size; /* stack grows down */
613     trusty_thread->stack_size = stack_size;
614 #if USER_SCS_SUPPORTED
615     /* make shadow stack hit guard page if too small */
616     size_t adjustment =
617             round_up(shadow_stack_size, PAGE_SIZE) - shadow_stack_size;
618 
619     /* we only make an adjustment iff app has shadow call stacks enabled */
620     DEBUG_ASSERT(shadow_stack_size > 0 || adjustment == 0);
621 
622     /* shadow stack grows up */
623     trusty_thread->shadow_stack_base = shadow_stack_base + adjustment;
624     trusty_thread->shadow_stack_size = shadow_stack_size;
625 #endif
626     thread_tls_set(trusty_thread->thread, TLS_ENTRY_TRUSTY,
627                    (uintptr_t)trusty_thread);
628 
629     int pinned_cpu = trusty_app->props.pinned_cpu;
630     if (pinned_cpu != APP_MANIFEST_PINNED_CPU_NONE) {
631         thread_set_pinned_cpu(trusty_thread->thread, pinned_cpu);
632         dprintf(SPEW, "trusty_app %d, %s pinned to CPU: %u\n",
633                 trusty_app->app_id, trusty_app->props.app_name, pinned_cpu);
634     }
635 
636     return trusty_thread;
637 
638 err_thread:
639 #if USER_SCS_SUPPORTED
640     if (shadow_stack_size) {
641         vmm_free_region(trusty_app->aspace, shadow_stack_base);
642     }
643 err_shadow_stack:
644 #endif
645     vmm_free_region(trusty_app->aspace, stack_bot);
646 err_stack:
647     free(trusty_thread);
648     return NULL;
649 }
650 
651 /* Must be called with the apps_lock held */
find_manifest_port_entry_locked(const char * port_path,struct trusty_app ** app_out)652 static struct manifest_port_entry* find_manifest_port_entry_locked(
653         const char* port_path,
654         struct trusty_app** app_out) {
655     struct trusty_app* app;
656     struct manifest_port_entry* entry;
657 
658     DEBUG_ASSERT(is_mutex_held(&apps_lock));
659 
660     list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
661         list_for_every_entry(&app->props.port_entry_list, entry,
662                              struct manifest_port_entry, node) {
663             if (!strncmp(port_path, entry->path, entry->path_len)) {
664                 if (app_out)
665                     *app_out = app;
666 
667                 return entry;
668             }
669         }
670     }
671 
672     return NULL;
673 }
674 /* Must be called with the apps_lock held */
trusty_app_find_by_uuid_locked(uuid_t * uuid)675 static struct trusty_app* trusty_app_find_by_uuid_locked(uuid_t* uuid) {
676     struct trusty_app* app;
677 
678     DEBUG_ASSERT(is_mutex_held(&apps_lock));
679 
680     list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
681         if (!memcmp(&app->props.uuid, uuid, sizeof(uuid_t)))
682             return app;
683     }
684 
685     return NULL;
686 }
687 
trusty_uuid_dma_is_allowed(const struct uuid * uuid,paddr_t paddr)688 bool trusty_uuid_dma_is_allowed(const struct uuid* uuid, paddr_t paddr) {
689     bool res;
690     const struct trusty_app* app;
691     mutex_acquire(&apps_lock);
692     app = trusty_app_find_by_uuid_locked((struct uuid*)uuid);
693     res = trusty_app_dma_is_allowed_locked(app, paddr);
694     mutex_release(&apps_lock);
695     return res;
696 }
697 
get_app_manifest_config_data(struct trusty_app * trusty_app,char ** manifest_data,size_t * size)698 static status_t get_app_manifest_config_data(struct trusty_app* trusty_app,
699                                              char** manifest_data,
700                                              size_t* size) {
701     struct trusty_app_img* app_img;
702 
703     app_img = &trusty_app->app_img;
704     if (!app_img->manifest_start) {
705         dprintf(CRITICAL, "manifest section header not found\n");
706         return ERR_NOT_VALID;
707     }
708 
709     /* manifest data is embedded in kernel */
710     dprintf(SPEW,
711             "trusty app manifest: start %p size 0x%08" PRIxPTR " end %p\n",
712             (void*)app_img->manifest_start,
713             app_img->manifest_end - app_img->manifest_start,
714             (void*)app_img->manifest_end);
715 
716     *size = app_img->manifest_end - app_img->manifest_start;
717     *manifest_data = (char*)app_img->manifest_start;
718 
719     return NO_ERROR;
720 }
721 
destroy_app_phys_mem(struct phys_mem_obj * obj)722 static void destroy_app_phys_mem(struct phys_mem_obj* obj) {
723     struct manifest_mmio_entry* mmio_entry;
724     mmio_entry = containerof(obj, struct manifest_mmio_entry, phys_mem_obj);
725     assert(!list_in_list(&mmio_entry->node));
726     free(mmio_entry);
727 }
728 
729 /**
730  * load_app_elf_gnu_property_array() - Load app properties from ELF GNU property
731  * array.
732  * @trusty_app:  Trusty application, both giving ELF section and props.
733  * @offset:      Byte offset of the ELF GNU property array structure.
734  * @length:      Length in bytes of the ELF GNU property array.
735  * @out:         Out pointer to write the selected bias to. Only valid if the
736  *               function returned 0.
737  *
738  * Return: If nonzero, the ELF is malformed.  Otherwise NO_ERROR.
739  */
load_app_elf_gnu_property_array(struct trusty_app * trusty_app,Elf_Off offset,size_t length)740 static status_t load_app_elf_gnu_property_array(struct trusty_app* trusty_app,
741                                                 Elf_Off offset,
742                                                 size_t length) {
743     const void* elf_start = (void*)trusty_app->app_img.img_start;
744 
745     /* Check property array is within the ELF image */
746     if (!address_range_within_img(elf_start + offset, length,
747                                   &trusty_app->app_img)) {
748         return ERR_NOT_VALID;
749     }
750 
751     /* Walk through the variable length properties */
752     while (length >= sizeof(ELF_GnuProp)) {
753         const ELF_GnuProp* gp = elf_start + offset;
754         Elf_Word gp_size = sizeof(ELF_GnuProp);
755 
756         /* Check header is within bounds */
757         if (!address_range_within_img(gp, gp_size, &trusty_app->app_img)) {
758             return ERR_NOT_VALID;
759         }
760 
761         /* Update full size and round to either 4 or 8 byte alignment */
762         gp_size += gp->pr_datasz;
763         gp_size += sizeof(Elf_Word) - 1;
764         gp_size &= ~(sizeof(Elf_Word) - 1);
765 
766         /* Check access to the full property */
767         if (gp_size < sizeof(ELF_GnuProp) ||
768             !address_range_within_img(gp, gp_size, &trusty_app->app_img)) {
769             return ERR_NOT_VALID;
770         }
771 
772 #ifdef ARCH_ARM64
773         /* TODO(mikemcternan): Split into an arch specific function */
774         if (gp && gp->pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
775             /* This property should always have an 32-bit value */
776             if (gp->pr_datasz != sizeof(Elf32_Word)) {
777                 return ERR_NOT_VALID;
778             }
779 
780             switch (gp->pr_data[0]) {
781             case GNU_PROPERTY_AARCH64_FEATURE_1_BTI:
782                 trusty_app->props.feature_bti = true;
783                 break;
784             default:
785                 break;
786             }
787         }
788 #endif
789         if (length <= gp_size) {
790             length = 0;
791         } else {
792             length -= gp_size;
793             offset += gp_size;
794         }
795     }
796 
797     return NO_ERROR;
798 }
799 
load_app_elf_options(struct trusty_app * trusty_app)800 static status_t load_app_elf_options(struct trusty_app* trusty_app) {
801     const struct trusty_app_img* app_img = &trusty_app->app_img;
802     const ELF_EHDR* elf = (ELF_EHDR*)app_img->img_start;
803 
804     /* Iterate ELF program headers to find PT_GNU_PROPERTY section */
805     for (int i = 0; i < elf->e_phnum; i++) {
806         const ELF_PHDR* phdr =
807                 (const void*)elf + elf->e_phoff + (elf->e_phentsize * i);
808 
809         if (!address_range_within_img(phdr, sizeof(ELF_PHDR),
810                                       &trusty_app->app_img)) {
811             return ERR_NOT_VALID;
812         }
813 
814         /* Check for a GNU property section */
815         if (phdr->p_type == PT_GNU_PROPERTY) {
816             const ELF_NHDR* nhdr = (const void*)elf + phdr->p_offset;
817             const int nhdr_len = sizeof(ELF_NHDR) + sizeof("GNU");
818 
819             if (!address_range_within_img(nhdr, nhdr_len,
820                                           &trusty_app->app_img)) {
821                 return ERR_NOT_VALID;
822             }
823 
824             if (nhdr->n_namesz == sizeof("GNU") &&
825                 nhdr->n_type == NT_GNU_PROPERTY_TYPE_0 &&
826                 strcmp("GNU", (const char*)nhdr + sizeof(ELF_NHDR)) == 0) {
827                 const Elf_Off n_desc = phdr->p_offset + nhdr_len;
828 
829                 status_t ret = load_app_elf_gnu_property_array(
830                         trusty_app, n_desc, nhdr->n_descsz);
831                 if (ret != NO_ERROR) {
832                     return ret;
833                 }
834             }
835         }
836     }
837 
838     return NO_ERROR;
839 }
840 
load_app_config_options(struct trusty_app * trusty_app)841 static status_t load_app_config_options(struct trusty_app* trusty_app) {
842     char* manifest_data;
843     size_t manifest_size;
844     uint32_t mmio_arch_mmu_flags;
845     uint64_t mmio_size;
846     struct manifest_mmio_entry* mmio_entry;
847     paddr_t tmp_paddr;
848     status_t ret;
849     struct manifest_port_entry* entry;
850     struct app_manifest_iterator manifest_iter;
851     struct app_manifest_config_entry manifest_entry;
852     const char* unknown_app_name = "<unknown>";
853 
854     /* init default config options before parsing manifest */
855     trusty_app->props.app_name = unknown_app_name;
856     trusty_app->props.min_heap_size = DEFAULT_HEAP_SIZE;
857     trusty_app->props.min_stack_size = DEFAULT_STACK_SIZE;
858     /* binary manifest must specify the min shadow stack size */
859     trusty_app->props.min_shadow_stack_size = 0;
860     trusty_app->props.mgmt_flags = DEFAULT_MGMT_FLAGS;
861     trusty_app->props.pinned_cpu = APP_MANIFEST_PINNED_CPU_NONE;
862     trusty_app->props.priority = DEFAULT_PRIORITY;
863 
864     ret = load_app_elf_options(trusty_app);
865     if (ret != NO_ERROR) {
866         return ERR_NOT_VALID;
867     }
868 
869     manifest_data = NULL;
870     manifest_size = 0;
871     ret = get_app_manifest_config_data(trusty_app, &manifest_data,
872                                        &manifest_size);
873     if (ret != NO_ERROR) {
874         return ERR_NOT_VALID;
875     }
876 
877     /*
878      * Step thru configuration blob.
879      *
880      * Save off some configuration data while we are here but
881      * defer processing of other data until it is needed later.
882      */
883     ret = app_manifest_iterator_reset(&manifest_iter, manifest_data,
884                                       manifest_size);
885     if (ret != NO_ERROR) {
886         dprintf(CRITICAL, "error parsing manifest for app %u\n",
887                 trusty_app->app_id);
888         return ret;
889     }
890     while (app_manifest_iterator_next(&manifest_iter, &manifest_entry, &ret)) {
891         switch (manifest_entry.key) {
892         case APP_MANIFEST_CONFIG_KEY_MIN_STACK_SIZE:
893             trusty_app->props.min_stack_size =
894                     manifest_entry.value.min_stack_size;
895             if (trusty_app->props.min_stack_size == 0) {
896                 dprintf(CRITICAL,
897                         "manifest MIN_STACK_SIZE is 0 of app %u, %s\n",
898                         trusty_app->app_id, trusty_app->props.app_name);
899                 return ERR_NOT_VALID;
900             }
901             break;
902         case APP_MANIFEST_CONFIG_KEY_MIN_HEAP_SIZE:
903             trusty_app->props.min_heap_size =
904                     manifest_entry.value.min_heap_size;
905             break;
906         case APP_MANIFEST_CONFIG_KEY_MAP_MEM:
907             mmio_arch_mmu_flags = manifest_entry.value.mem_map.arch_mmu_flags;
908             mmio_size = round_up(manifest_entry.value.mem_map.size, PAGE_SIZE);
909             trusty_app->props.map_io_mem_cnt++;
910 
911             if (!IS_PAGE_ALIGNED(manifest_entry.value.mem_map.offset)) {
912                 dprintf(CRITICAL, "mmio_id %u not page aligned of app %u, %s\n",
913                         manifest_entry.value.mem_map.id, trusty_app->app_id,
914                         trusty_app->props.app_name);
915                 return ERR_NOT_VALID;
916             }
917 
918             if ((paddr_t)manifest_entry.value.mem_map.offset !=
919                         manifest_entry.value.mem_map.offset ||
920                 (size_t)mmio_size != mmio_size) {
921                 dprintf(CRITICAL,
922                         "mmio_id %d address/size too large of app %u, %s\n",
923                         manifest_entry.value.mem_map.id, trusty_app->app_id,
924                         trusty_app->props.app_name);
925                 return ERR_NOT_VALID;
926             }
927 
928             if (!mmio_size ||
929                 __builtin_add_overflow(manifest_entry.value.mem_map.offset,
930                                        mmio_size - 1, &tmp_paddr)) {
931                 dprintf(CRITICAL, "mmio_id %u bad size of app %u, %s\n",
932                         manifest_entry.value.mem_map.id, trusty_app->app_id,
933                         trusty_app->props.app_name);
934                 return ERR_NOT_VALID;
935             }
936 
937             if (manifest_entry.value.mem_map.arch_mmu_flags &
938                         ~(ARCH_MMU_FLAG_CACHE_MASK | ARCH_MMU_FLAG_NS) ||
939                 ((manifest_entry.value.mem_map.arch_mmu_flags &
940                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_CACHED &&
941                  (manifest_entry.value.mem_map.arch_mmu_flags &
942                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_UNCACHED &&
943                  (manifest_entry.value.mem_map.arch_mmu_flags &
944                   ARCH_MMU_FLAG_CACHE_MASK) != ARCH_MMU_FLAG_UNCACHED_DEVICE)) {
945                 dprintf(CRITICAL,
946                         "mmio_id %u bad arch_mmu_flags 0x%x of app %u, %s\n",
947                         manifest_entry.value.mem_map.id,
948                         manifest_entry.value.mem_map.arch_mmu_flags,
949                         trusty_app->app_id, trusty_app->props.app_name);
950                 return ERR_NOT_VALID;
951             }
952             mmio_arch_mmu_flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
953 
954             if (!app_mmio_is_allowed(
955                         trusty_app,
956                         (paddr_t)manifest_entry.value.mem_map.offset,
957                         mmio_size)) {
958                 dprintf(CRITICAL,
959                         "mmio_id %u not allowed for loadable app %u, %s\n",
960                         manifest_entry.value.mem_map.id, trusty_app->app_id,
961                         trusty_app->props.app_name);
962                 return ERR_NOT_VALID;
963             }
964 
965             mmio_entry = calloc(1, sizeof(struct manifest_mmio_entry));
966             if (!mmio_entry) {
967                 dprintf(CRITICAL,
968                         "Failed to allocate memory for manifest mmio %d of app %u, %s\n",
969                         manifest_entry.value.mem_map.id, trusty_app->app_id,
970                         trusty_app->props.app_name);
971                 return ERR_NO_MEMORY;
972             }
973 
974             phys_mem_obj_dynamic_initialize(&mmio_entry->phys_mem_obj,
975                                             &mmio_entry->phys_mem_obj_self_ref,
976                                             manifest_entry.value.mem_map.offset,
977                                             mmio_size, mmio_arch_mmu_flags,
978                                             destroy_app_phys_mem);
979             mmio_entry->id = manifest_entry.value.mem_map.id;
980             list_add_tail(&trusty_app->props.mmio_entry_list,
981                           &mmio_entry->node);
982 
983             break;
984         case APP_MANIFEST_CONFIG_KEY_MGMT_FLAGS:
985             trusty_app->props.mgmt_flags = manifest_entry.value.mgmt_flags;
986             break;
987         case APP_MANIFEST_CONFIG_KEY_START_PORT:
988             if (manifest_entry.value.start_port.name_size > IPC_PORT_PATH_MAX) {
989                 dprintf(CRITICAL,
990                         "manifest port name %s too long:%#" PRIx32
991                         " of app %u, %s\n",
992                         manifest_entry.value.start_port.name,
993                         manifest_entry.value.start_port.name_size,
994                         trusty_app->app_id, trusty_app->props.app_name);
995                 return ERR_NOT_VALID;
996             }
997 
998             entry = find_manifest_port_entry_locked(
999                     manifest_entry.value.start_port.name, NULL);
1000             if (entry) {
1001                 dprintf(CRITICAL, "Port %s is already registered\n",
1002                         manifest_entry.value.start_port.name);
1003                 return ERR_ALREADY_EXISTS;
1004             }
1005 
1006             entry = calloc(1, sizeof(struct manifest_port_entry));
1007             if (!entry) {
1008                 dprintf(CRITICAL,
1009                         "Failed to allocate memory for manifest port %s of app %u, %s\n",
1010                         manifest_entry.value.start_port.name,
1011                         trusty_app->app_id, trusty_app->props.app_name);
1012                 return ERR_NO_MEMORY;
1013             }
1014 
1015             entry->flags = manifest_entry.value.start_port.flags;
1016             entry->path_len = manifest_entry.value.start_port.name_size;
1017             entry->path = manifest_entry.value.start_port.name;
1018 
1019             list_add_tail(&trusty_app->props.port_entry_list, &entry->node);
1020 
1021             break;
1022         case APP_MANIFEST_CONFIG_KEY_PINNED_CPU:
1023             if (manifest_entry.value.pinned_cpu >= SMP_MAX_CPUS) {
1024                 dprintf(CRITICAL,
1025                         "pinned CPU index %u out of range, app %u, %s\n",
1026                         manifest_entry.value.pinned_cpu, trusty_app->app_id,
1027                         trusty_app->props.app_name);
1028                 return ERR_NOT_VALID;
1029             }
1030 
1031             trusty_app->props.pinned_cpu = manifest_entry.value.pinned_cpu;
1032             break;
1033         case APP_MANIFEST_CONFIG_KEY_PRIORITY:
1034             if (manifest_entry.value.priority < (LOWEST_PRIORITY + 2) ||
1035                 manifest_entry.value.priority > (HIGHEST_PRIORITY - 1)) {
1036                 dprintf(CRITICAL,
1037                         "priority value %u out of range, app %u, %s\n",
1038                         manifest_entry.value.priority, trusty_app->app_id,
1039                         trusty_app->props.app_name);
1040                 return ERR_NOT_VALID;
1041             }
1042             trusty_app->props.priority = manifest_entry.value.priority;
1043             break;
1044         case APP_MANIFEST_CONFIG_KEY_MIN_SHADOW_STACK_SIZE:
1045 #if !USER_SCS_SUPPORTED
1046             if (manifest_entry.value.min_shadow_stack_size) {
1047                 dprintf(CRITICAL,
1048                         "Shadow call stack requested by app %u, %s. Kernel "
1049                         "was not built to support user shadow call stacks\n",
1050                         trusty_app->app_id, trusty_app->props.app_name);
1051                 return ERR_NOT_VALID;
1052             }
1053 #endif
1054             trusty_app->props.min_shadow_stack_size =
1055                     manifest_entry.value.min_shadow_stack_size;
1056             /* min_shadow_stack_size == 0 means app opted out of shadow stack */
1057             break;
1058         case APP_MANIFEST_CONFIG_KEY_UUID:
1059             memcpy(&trusty_app->props.uuid, &manifest_entry.value.uuid,
1060                    sizeof(uuid_t));
1061             break;
1062         case APP_MANIFEST_CONFIG_KEY_APP_NAME:
1063             trusty_app->props.app_name = manifest_entry.value.app_name;
1064             break;
1065         case APP_MANIFEST_CONFIG_KEY_VERSION:
1066         case APP_MANIFEST_CONFIG_KEY_MIN_VERSION:
1067         case APP_MANIFEST_CONFIG_KEY_APPLOADER_FLAGS:
1068             /* Handled by apploader */
1069             break;
1070         }
1071     }
1072     if (ret != NO_ERROR) {
1073         dprintf(CRITICAL, "error parsing manifest for app %u\n",
1074                 trusty_app->app_id);
1075         return ret;
1076     }
1077     if (trusty_app->props.app_name == unknown_app_name) {
1078         dprintf(CRITICAL, "app-name missing for app %u\n", trusty_app->app_id);
1079         return ERR_NOT_VALID;
1080     }
1081 
1082     if (trusty_app_find_by_uuid_locked(&trusty_app->props.uuid)) {
1083         PRINT_TRUSTY_APP_UUID(CRITICAL, trusty_app->app_id,
1084                               &trusty_app->props.uuid);
1085         dprintf(CRITICAL, "app already registered\n");
1086         return ERR_ALREADY_EXISTS;
1087     }
1088 
1089     PRINT_TRUSTY_APP_UUID(SPEW, trusty_app->app_id, &trusty_app->props.uuid);
1090     dprintf(SPEW, "trusty_app %u name: %s priority: %u\n", trusty_app->app_id,
1091             trusty_app->props.app_name, trusty_app->props.priority);
1092 
1093     if (trusty_app->props.feature_bti) {
1094         const char* status;
1095 #ifndef USER_BTI_DISABLED
1096         status = arch_bti_supported() ? "enabled"
1097                                       : "ignored (unsupported by hw)";
1098 #else
1099         status = "ignored (disabled in kernel)";
1100 #endif
1101         dprintf(SPEW, "trusty_app %u  bti: %s\n", trusty_app->app_id, status);
1102     }
1103 
1104     LTRACEF("trusty_app %p: stack_sz=0x%x\n", trusty_app,
1105             trusty_app->props.min_stack_size);
1106     LTRACEF("trusty_app %p: heap_sz=0x%x\n", trusty_app,
1107             trusty_app->props.min_heap_size);
1108     LTRACEF("trusty_app %p: num_io_mem=%d\n", trusty_app,
1109             trusty_app->props.map_io_mem_cnt);
1110 
1111     return NO_ERROR;
1112 }
1113 
init_brk(struct trusty_app * trusty_app)1114 static status_t init_brk(struct trusty_app* trusty_app) {
1115     status_t status;
1116     vaddr_t start_brk;
1117     vaddr_t brk_size;
1118 
1119     /*
1120      * Make sure the heap is page aligned and page sized.
1121      * Most user space allocators assume this. Historically, we tried to
1122      * scavange space at the end of .bss for the heap but this misaligned the
1123      * heap and caused userspace allocators to behave is subtly unpredictable
1124      * ways.
1125      */
1126     start_brk = 0;
1127     brk_size = round_up(trusty_app->props.min_heap_size, PAGE_SIZE);
1128 
1129     /* Allocate if needed. */
1130     if (brk_size > 0) {
1131         status = vmm_alloc_no_physical(
1132                 trusty_app->aspace, "brk_heap_res", brk_size,
1133                 (void**)&start_brk, PAGE_SIZE_SHIFT, 0,
1134                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1135 
1136         if (status != NO_ERROR) {
1137             dprintf(CRITICAL,
1138                     "failed(%d) to create heap(0x%" PRIxPTR
1139                     ") for app %u, %s\n",
1140                     status, start_brk, trusty_app->app_id,
1141                     trusty_app->props.app_name);
1142             return ERR_NO_MEMORY;
1143         }
1144     }
1145 
1146     /* Record the location. */
1147     trusty_app->used_brk = false;
1148     trusty_app->start_brk = start_brk;
1149     trusty_app->cur_brk = start_brk;
1150     trusty_app->end_brk = start_brk + brk_size;
1151 
1152     return NO_ERROR;
1153 }
1154 
1155 /**
1156  * select_load_bias() - Pick a a load bias for an ELF
1157  * @phdr:      Pre-validated program header array base
1158  * @num_phdrs: Number of program headers
1159  * @aspace:    The address space the bias needs to be valid in
1160  * @out:       Out pointer to write the selected bias to. Only valid if the
1161  *             function returned 0.
1162  *
1163  * This function calculates an offset that can be added to every loadable ELF
1164  * segment in the image and still result in a legal load address.
1165  *
1166  * Return: A status code indicating whether a bias was located. If nonzero,
1167  *         the bias output may be invalid.
1168  */
select_load_bias(ELF_PHDR * phdr,size_t num_phdrs,vmm_aspace_t * aspace,vaddr_t * out)1169 static status_t select_load_bias(ELF_PHDR* phdr,
1170                                  size_t num_phdrs,
1171                                  vmm_aspace_t* aspace,
1172                                  vaddr_t* out) {
1173     DEBUG_ASSERT(out);
1174 #if ASLR
1175     vaddr_t low = VADDR_MAX;
1176     vaddr_t high = 0;
1177     for (size_t i = 0; i < num_phdrs; i++, phdr++) {
1178         low = MIN(low, phdr->p_vaddr);
1179         vaddr_t candidate_high;
1180         if (!__builtin_add_overflow(phdr->p_vaddr, phdr->p_memsz,
1181                                     &candidate_high)) {
1182             high = MAX(high, candidate_high);
1183         } else {
1184             dprintf(CRITICAL, "Segment %zu overflows virtual address space\n",
1185                     i);
1186             return ERR_NOT_VALID;
1187         }
1188     }
1189     LTRACEF("ELF Segment range: %" PRIxVADDR "->%" PRIxVADDR "\n", low, high);
1190 
1191     DEBUG_ASSERT(high >= low);
1192     size_t size = round_up(high - low, PAGE_SIZE);
1193     LTRACEF("Spot size: %zu\n", size);
1194 
1195     vaddr_t spot;
1196     if (!vmm_find_spot(aspace, size, &spot)) {
1197         return ERR_NO_MEMORY;
1198     }
1199     LTRACEF("Load target: %" PRIxVADDR "\n", spot);
1200 
1201     /*
1202      * Overflow is acceptable here, since adding the delta to the lowest
1203      * ELF load address will still return to spot, which was the goal.
1204      */
1205     __builtin_sub_overflow(spot, low, out);
1206 #else
1207     /* If ASLR is disabled, the app is not PIE, use a load bias of 0 */
1208     *out = 0;
1209 #endif
1210 
1211     LTRACEF("Load bias: %" PRIxVADDR "\n", *out);
1212 
1213     return NO_ERROR;
1214 }
1215 
elf_vaddr_mapped(struct trusty_app * trusty_app,size_t vaddr,ssize_t offset)1216 static bool elf_vaddr_mapped(struct trusty_app* trusty_app,
1217                              size_t vaddr,
1218                              ssize_t offset) {
1219     ELF_EHDR* elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1220     void* trusty_app_image = (void*)trusty_app->app_img.img_start;
1221     ELF_PHDR* prg_hdr = (ELF_PHDR*)(trusty_app_image + elf_hdr->e_phoff);
1222     if (__builtin_add_overflow(vaddr, offset, &vaddr)) {
1223         return false;
1224     }
1225     for (size_t i = 0; i < elf_hdr->e_phnum; i++, prg_hdr++) {
1226         Elf_Addr end;
1227         __builtin_add_overflow(prg_hdr->p_vaddr, prg_hdr->p_memsz, &end);
1228         if (prg_hdr->p_type == PT_LOAD &&
1229             vaddr >= round_down(prg_hdr->p_vaddr, PAGE_SIZE) &&
1230             vaddr < round_up(end, PAGE_SIZE)) {
1231             return true;
1232         }
1233     }
1234     return false;
1235 }
1236 
alloc_address_map(struct trusty_app * trusty_app)1237 static status_t alloc_address_map(struct trusty_app* trusty_app) {
1238     ELF_EHDR* elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1239     void* trusty_app_image;
1240     ELF_PHDR* prg_hdr;
1241     u_int i;
1242     status_t ret;
1243     trusty_app_image = (void*)trusty_app->app_img.img_start;
1244 
1245     prg_hdr = (ELF_PHDR*)(trusty_app_image + elf_hdr->e_phoff);
1246 
1247     if (!address_range_within_img(prg_hdr, sizeof(ELF_PHDR) * elf_hdr->e_phnum,
1248                                   &trusty_app->app_img)) {
1249         dprintf(CRITICAL, "ELF program headers table out of bounds\n");
1250         return ERR_NOT_VALID;
1251     }
1252 
1253     status_t bias_result =
1254             select_load_bias(prg_hdr, elf_hdr->e_phnum, trusty_app->aspace,
1255                              &trusty_app->load_bias);
1256     if (bias_result) {
1257         return bias_result;
1258     }
1259 
1260     size_t has_guard_low = 0;
1261     size_t has_guard_high = 0;
1262 
1263     /* create mappings for PT_LOAD sections */
1264     for (i = 0; i < elf_hdr->e_phnum; i++, prg_hdr++) {
1265         /* load_bias uses overflow to lower vaddr if needed */
1266         Elf_Addr p_vaddr;
1267         __builtin_add_overflow(prg_hdr->p_vaddr, trusty_app->load_bias,
1268                                &p_vaddr);
1269 
1270         LTRACEF("trusty_app %d, %s: ELF type 0x%x"
1271                 ", vaddr 0x%08" PRIxELF_Addr ", paddr 0x%08" PRIxELF_Addr
1272                 ", rsize 0x%08" PRIxELF_Size ", msize 0x%08" PRIxELF_Size
1273                 ", flags 0x%08x\n",
1274                 trusty_app->app_id, trusty_app->props.app_name, prg_hdr->p_type,
1275                 p_vaddr, prg_hdr->p_paddr, prg_hdr->p_filesz, prg_hdr->p_memsz,
1276                 prg_hdr->p_flags);
1277 
1278         if (prg_hdr->p_type != PT_LOAD)
1279             continue;
1280 
1281         if (p_vaddr < USER_ASPACE_BASE) {
1282             TRACEF("Attempted to load segment beneath user address space\n");
1283             return ERR_NOT_VALID;
1284         }
1285 
1286         vaddr_t vaddr = p_vaddr;
1287         vaddr_t img_kvaddr = (vaddr_t)(trusty_app_image + prg_hdr->p_offset);
1288         size_t mapping_size;
1289 
1290         if (vaddr & PAGE_MASK) {
1291             dprintf(CRITICAL,
1292                     "segment %u load address 0x%" PRIxVADDR
1293                     " in not page aligned for app %u, %s\n",
1294                     i, vaddr, trusty_app->app_id, trusty_app->props.app_name);
1295             return ERR_NOT_VALID;
1296         }
1297 
1298         if (img_kvaddr & PAGE_MASK) {
1299             dprintf(CRITICAL,
1300                     "segment %u image address 0x%" PRIxVADDR
1301                     " in not page aligned for app %u, %s\n",
1302                     i, img_kvaddr, trusty_app->app_id,
1303                     trusty_app->props.app_name);
1304             return ERR_NOT_VALID;
1305         }
1306 
1307         uint vmm_flags = VMM_FLAG_VALLOC_SPECIFIC;
1308         if (elf_vaddr_mapped(trusty_app, prg_hdr->p_vaddr,
1309                              -(ssize_t)PAGE_SIZE)) {
1310             vmm_flags |= VMM_FLAG_NO_START_GUARD;
1311         } else {
1312             has_guard_low++;
1313         }
1314         if (elf_vaddr_mapped(
1315                     trusty_app,
1316                     prg_hdr->p_vaddr + round_up(prg_hdr->p_memsz, PAGE_SIZE),
1317                     0)) {
1318             vmm_flags |= VMM_FLAG_NO_END_GUARD;
1319         } else {
1320             has_guard_high++;
1321         }
1322 
1323         uint arch_mmu_flags = ARCH_MMU_FLAG_PERM_USER;
1324         if (!(prg_hdr->p_flags & PF_X)) {
1325             arch_mmu_flags += ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1326         }
1327 
1328         if (prg_hdr->p_flags & PF_W) {
1329             paddr_t upaddr;
1330             void* load_kvaddr;
1331             size_t copy_size;
1332             size_t file_size;
1333             mapping_size = round_up(prg_hdr->p_memsz, PAGE_SIZE);
1334 
1335             if (!address_range_within_img((void*)img_kvaddr, prg_hdr->p_filesz,
1336                                           &trusty_app->app_img)) {
1337                 dprintf(CRITICAL, "ELF Program segment %u out of bounds\n", i);
1338                 return ERR_NOT_VALID;
1339             }
1340 
1341             ret = vmm_alloc(trusty_app->aspace, "elfseg", mapping_size,
1342                             (void**)&vaddr, PAGE_SIZE_SHIFT, vmm_flags,
1343                             arch_mmu_flags);
1344 
1345             if (ret != NO_ERROR) {
1346                 dprintf(CRITICAL,
1347                         "failed(%d) to allocate data segment(0x%" PRIxVADDR
1348                         ") %u for app %u, %s\n",
1349                         ret, vaddr, i, trusty_app->app_id,
1350                         trusty_app->props.app_name);
1351                 return ret;
1352             }
1353 
1354             ASSERT(vaddr == p_vaddr);
1355 
1356             file_size = prg_hdr->p_filesz;
1357             while (file_size > 0) {
1358                 ret = arch_mmu_query(&trusty_app->aspace->arch_aspace, vaddr,
1359                                      &upaddr, NULL);
1360                 if (ret != NO_ERROR) {
1361                     dprintf(CRITICAL, "Could not copy data segment: %d\n", ret);
1362                     return ret;
1363                 }
1364 
1365                 load_kvaddr = paddr_to_kvaddr(upaddr);
1366                 ASSERT(load_kvaddr);
1367                 copy_size = MIN(file_size, PAGE_SIZE);
1368                 memcpy(load_kvaddr, (void*)img_kvaddr, copy_size);
1369                 file_size -= copy_size;
1370                 vaddr += copy_size;
1371                 img_kvaddr += copy_size;
1372             }
1373 
1374         } else {
1375             mapping_size = round_up(prg_hdr->p_filesz, PAGE_SIZE);
1376 
1377             if (!address_range_within_img((void*)img_kvaddr, mapping_size,
1378                                           &trusty_app->app_img)) {
1379                 dprintf(CRITICAL, "ELF Program segment %u out of bounds\n", i);
1380                 return ERR_NOT_VALID;
1381             }
1382             if (mapping_size != round_up(prg_hdr->p_memsz, PAGE_SIZE)) {
1383                 dprintf(CRITICAL, "ELF Program segment %u bad memsz\n", i);
1384                 return ERR_NOT_VALID;
1385             }
1386 
1387             paddr_t* paddr_arr =
1388                     calloc(mapping_size / PAGE_SIZE, sizeof(paddr_t));
1389             if (!paddr_arr) {
1390                 dprintf(CRITICAL,
1391                         "Failed to allocate physical address array\n");
1392                 return ERR_NO_MEMORY;
1393             }
1394 
1395             for (size_t j = 0; j < mapping_size / PAGE_SIZE; j++) {
1396                 paddr_arr[j] =
1397                         vaddr_to_paddr((void*)(img_kvaddr + PAGE_SIZE * j));
1398                 DEBUG_ASSERT(paddr_arr[j] && !(paddr_arr[j] & PAGE_MASK));
1399             }
1400 
1401             arch_mmu_flags += ARCH_MMU_FLAG_PERM_RO;
1402             ret = vmm_alloc_physical_etc(
1403                     trusty_app->aspace, "elfseg", mapping_size, (void**)&vaddr,
1404                     PAGE_SIZE_SHIFT, paddr_arr, mapping_size / PAGE_SIZE,
1405                     vmm_flags, arch_mmu_flags);
1406             if (ret != NO_ERROR) {
1407                 dprintf(CRITICAL,
1408                         "failed(%d) to map RO segment(0x%" PRIxVADDR
1409                         ") %u for app %u, %s\n",
1410                         ret, vaddr, i, trusty_app->app_id,
1411                         trusty_app->props.app_name);
1412                 free(paddr_arr);
1413                 return ret;
1414             }
1415 
1416             ASSERT(vaddr == p_vaddr);
1417             free(paddr_arr);
1418         }
1419 
1420         LTRACEF("trusty_app %d, %s: load vaddr 0x%08" PRIxVADDR
1421                 ", paddr 0x%08" PRIxVADDR
1422                 ", rsize 0x%08zx, msize 0x%08" PRIxELF_Size
1423                 ", access r%c%c, flags 0x%x\n",
1424                 trusty_app->app_id, trusty_app->props.app_name, vaddr,
1425                 vaddr_to_paddr((void*)vaddr), mapping_size, prg_hdr->p_memsz,
1426                 arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO ? '-' : 'w',
1427                 arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE ? '-' : 'x',
1428                 arch_mmu_flags);
1429     }
1430 
1431     ASSERT(has_guard_low);
1432     ASSERT(has_guard_high);
1433     ASSERT(has_guard_low == has_guard_high);
1434 
1435     ret = init_brk(trusty_app);
1436     if (ret != NO_ERROR) {
1437         dprintf(CRITICAL,
1438                 "failed to load trusty_app: trusty_app heap creation error\n");
1439         return ret;
1440     }
1441 
1442     dprintf(SPEW,
1443             "trusty_app %d, %s: brk:  start 0x%08" PRIxPTR " end 0x%08" PRIxPTR
1444             "\n",
1445             trusty_app->app_id, trusty_app->props.app_name,
1446             trusty_app->start_brk, trusty_app->end_brk);
1447     dprintf(SPEW, "trusty_app %d, %s: entry 0x%08" PRIxELF_Addr "\n",
1448             trusty_app->app_id, trusty_app->props.app_name, elf_hdr->e_entry);
1449 
1450     return NO_ERROR;
1451 }
1452 
has_waiting_connection(struct trusty_app * app)1453 static bool has_waiting_connection(struct trusty_app* app) {
1454     struct manifest_port_entry* entry;
1455 
1456     /*
1457      * Don't hold the apps lock when calling into other subsystems with calls
1458      * that may grab additional locks.
1459      */
1460     DEBUG_ASSERT(!is_mutex_held(&apps_lock));
1461 
1462     list_for_every_entry(&app->props.port_entry_list, entry,
1463                          struct manifest_port_entry, node) {
1464         if (ipc_connection_waiting_for_port(entry->path, entry->flags)) {
1465             return true;
1466         }
1467     }
1468 
1469     return false;
1470 }
1471 
kill_waiting_connections(struct trusty_app * app)1472 static void kill_waiting_connections(struct trusty_app* app) {
1473     struct manifest_port_entry* entry;
1474 
1475     /*
1476      * Don't hold the apps lock when calling into other subsystems with calls
1477      * that may grab additional locks.
1478      */
1479     DEBUG_ASSERT(!is_mutex_held(&apps_lock));
1480 
1481     list_for_every_entry(&app->props.port_entry_list, entry,
1482                          struct manifest_port_entry, node) {
1483         ipc_remove_connection_waiting_for_port(entry->path, entry->flags);
1484     }
1485 }
1486 
1487 /* Must be called with the apps_lock held */
request_app_start_locked(struct trusty_app * app)1488 static status_t request_app_start_locked(struct trusty_app* app) {
1489     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1490 
1491     switch (app->state) {
1492     case APP_NOT_RUNNING:
1493         app->state = APP_STARTING;
1494         event_signal(&app_mgr_event, false);
1495         return NO_ERROR;
1496     case APP_STARTING:
1497     case APP_RUNNING:
1498     case APP_RESTARTING:
1499         return ERR_ALREADY_STARTED;
1500     case APP_TERMINATING:
1501         /*
1502          * We got a new connection while terminating, change the state so
1503          * app_mgr_handle_terminating can restart the app.
1504          */
1505         app->state = APP_RESTARTING;
1506         return ERR_ALREADY_STARTED;
1507     case APP_FAILED_TO_START:
1508         /* The app failed to start so it shouldn't accept new connections. */
1509         return ERR_CANCELLED;
1510         /*
1511          * There is no default case here because we want the compiler to warn us
1512          * if we forget a state (controlled by the -Wswitch option which is
1513          * included in -Wall). Whenever someone adds a new state without
1514          * handling it here, they should get a compiler error.
1515          */
1516     }
1517 }
1518 
1519 /*
1520  * Create a trusty_app from its memory image and add it to the global list of
1521  * apps. Returns the created app in out_trusty_app if not NULL.
1522  */
trusty_app_create(struct trusty_app_img * app_img,struct trusty_app ** out_trusty_app,uint32_t flags)1523 static status_t trusty_app_create(struct trusty_app_img* app_img,
1524                                   struct trusty_app** out_trusty_app,
1525                                   uint32_t flags) {
1526     ELF_EHDR* ehdr;
1527     struct trusty_app* trusty_app;
1528     status_t ret;
1529     struct manifest_port_entry* port_entry;
1530     struct manifest_port_entry* tmp_port_entry;
1531     struct manifest_mmio_entry* mmio_entry;
1532     struct manifest_mmio_entry* tmp_mmio_entry;
1533 
1534     DEBUG_ASSERT(!(flags & ~(uint32_t)APP_FLAGS_CREATION_MASK));
1535 
1536     if (app_img->img_start & PAGE_MASK || app_img->img_end & PAGE_MASK) {
1537         dprintf(CRITICAL,
1538                 "app image is not page aligned start 0x%" PRIxPTR
1539                 " end 0x%" PRIxPTR "\n",
1540                 app_img->img_start, app_img->img_end);
1541         return ERR_NOT_VALID;
1542     }
1543 
1544     dprintf(SPEW, "trusty_app: start %p size 0x%08" PRIxPTR " end %p\n",
1545             (void*)app_img->img_start, app_img->img_end - app_img->img_start,
1546             (void*)app_img->img_end);
1547 
1548     trusty_app = (struct trusty_app*)calloc(1, sizeof(struct trusty_app));
1549     if (!trusty_app) {
1550         dprintf(CRITICAL,
1551                 "trusty_app: failed to allocate memory for trusty app\n");
1552         return ERR_NO_MEMORY;
1553     }
1554     list_initialize(&trusty_app->props.port_entry_list);
1555     list_initialize(&trusty_app->props.mmio_entry_list);
1556     list_initialize(&trusty_app->props.dma_entry_list);
1557 
1558     ehdr = (ELF_EHDR*)app_img->img_start;
1559     if (!address_range_within_img(ehdr, sizeof(ELF_EHDR), app_img)) {
1560         dprintf(CRITICAL, "trusty_app_create: ELF header out of bounds\n");
1561         ret = ERR_NOT_VALID;
1562         goto err_hdr;
1563     }
1564 
1565     if (strncmp((char*)ehdr->e_ident, ELFMAG, SELFMAG)) {
1566         dprintf(CRITICAL, "trusty_app_create: ELF header not found\n");
1567         ret = ERR_NOT_VALID;
1568         goto err_hdr;
1569     }
1570 
1571     trusty_app->app_id = trusty_next_app_id++;
1572     trusty_app->app_img = *app_img;
1573     trusty_app->state = APP_NOT_RUNNING;
1574     trusty_app->flags |= flags;
1575 
1576     mutex_acquire(&apps_lock);
1577 
1578     ret = load_app_config_options(trusty_app);
1579     if (ret == NO_ERROR) {
1580         list_add_tail(&trusty_app_list, &trusty_app->node);
1581     }
1582 
1583     mutex_release(&apps_lock);
1584 
1585     if (ret == NO_ERROR) {
1586         if (out_trusty_app) {
1587             /*
1588              * TODO: this returns an app pointer without holding the lock; the
1589              * app might get unloaded while the caller holds this pointer, so
1590              * we need to handle this case correctly
1591              */
1592             *out_trusty_app = trusty_app;
1593         }
1594 
1595         return ret;
1596     }
1597 
1598     dprintf(CRITICAL, "manifest processing failed(%d)\n", ret);
1599 
1600 err_load:
1601     list_for_every_entry_safe(&trusty_app->props.port_entry_list, port_entry,
1602                               tmp_port_entry, struct manifest_port_entry,
1603                               node) {
1604         list_delete(&port_entry->node);
1605         free(port_entry);
1606     }
1607     list_for_every_entry_safe(&trusty_app->props.mmio_entry_list, mmio_entry,
1608                               tmp_mmio_entry, struct manifest_mmio_entry,
1609                               node) {
1610         list_delete(&mmio_entry->node);
1611         vmm_obj_del_ref(&mmio_entry->phys_mem_obj.vmm_obj,
1612                         &mmio_entry->phys_mem_obj_self_ref);
1613     }
1614 err_hdr:
1615     free(trusty_app);
1616     return ret;
1617 }
1618 
trusty_app_create_and_start(struct trusty_app_img * app_img,uint32_t flags)1619 status_t trusty_app_create_and_start(struct trusty_app_img* app_img,
1620                                      uint32_t flags) {
1621     status_t ret;
1622     struct trusty_app* trusty_app;
1623 
1624     ret = trusty_app_create(app_img, &trusty_app, flags);
1625     if (ret != NO_ERROR) {
1626         return ret;
1627     }
1628 
1629     /* Loadable apps with deferred_start might have clients waiting for them */
1630     if (!(trusty_app->props.mgmt_flags &
1631           APP_MANIFEST_MGMT_FLAGS_DEFERRED_START) ||
1632         has_waiting_connection(trusty_app)) {
1633         mutex_acquire(&apps_lock);
1634         ret = request_app_start_locked(trusty_app);
1635         mutex_release(&apps_lock);
1636 
1637         /*
1638          * Since we drop apps_lock between trusty_app_create and here,
1639          * it is possible for another thread to race us and start the
1640          * app from trusty_app_request_start_by_port before we
1641          * reacquire the lock. In that case, request_app_start_locked
1642          * returns ERR_ALREADY_STARTED here. We treat this case as a
1643          * success and return NO_ERROR since the application is
1644          * running and we don't want the kernel service to
1645          * free its memory.
1646          */
1647         if (ret == ERR_ALREADY_STARTED) {
1648             ret = NO_ERROR;
1649         }
1650     }
1651 
1652     return ret;
1653 }
1654 
trusty_app_setup_mmio(struct trusty_app * trusty_app,uint32_t mmio_id,user_addr_t * uaddr_p,uint32_t map_size)1655 status_t trusty_app_setup_mmio(struct trusty_app* trusty_app,
1656                                uint32_t mmio_id,
1657                                user_addr_t* uaddr_p,
1658                                uint32_t map_size) {
1659     status_t ret;
1660     struct manifest_mmio_entry* mmio_entry;
1661 
1662     /* Should only be called on the currently running app */
1663     DEBUG_ASSERT(trusty_app == current_trusty_app());
1664 
1665     ASSERT(uaddr_p);
1666     void* va = (void*)(uintptr_t)(*uaddr_p);
1667 
1668     list_for_every_entry(&trusty_app->props.mmio_entry_list, mmio_entry,
1669                          struct manifest_mmio_entry, node) {
1670         char name[32];
1671 
1672         if (mmio_entry->id != mmio_id) {
1673             continue;
1674         }
1675 
1676         map_size = round_up(map_size, PAGE_SIZE);
1677         snprintf(name, sizeof(name), "mmio-%" PRIu32, mmio_id);
1678 
1679         ret = vmm_alloc_obj(
1680                 trusty_app->aspace, name, &mmio_entry->phys_mem_obj.vmm_obj, 0,
1681                 map_size, &va, 0, 0,
1682                 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
1683         if (ret == NO_ERROR) {
1684             *uaddr_p = (user_addr_t)(uintptr_t)va;
1685             DEBUG_ASSERT((void*)(uintptr_t)(*uaddr_p) == va);
1686         }
1687         return ret;
1688     }
1689 
1690     return ERR_NOT_FOUND;
1691 }
1692 
trusty_app_start(struct trusty_app * trusty_app)1693 static status_t trusty_app_start(struct trusty_app* trusty_app) {
1694     char name[32];
1695     struct trusty_thread* trusty_thread;
1696     struct trusty_app_notifier* n;
1697     ELF_EHDR* elf_hdr;
1698     uint flags = 0;
1699     int ret;
1700 
1701     DEBUG_ASSERT(trusty_app->state == APP_STARTING);
1702 
1703     snprintf(name, sizeof(name), "trusty_app_%d_%08x-%04x-%04x",
1704              trusty_app->app_id, trusty_app->props.uuid.time_low,
1705              trusty_app->props.uuid.time_mid,
1706              trusty_app->props.uuid.time_hi_and_version);
1707 
1708 #ifndef USER_BTI_DISABLED
1709     if (trusty_app->props.feature_bti && arch_bti_supported()) {
1710         flags |= VMM_ASPACE_FLAG_BTI;
1711     }
1712 #endif
1713 
1714     ret = vmm_create_aspace_with_quota(&trusty_app->aspace, name,
1715                                        trusty_app->props.min_heap_size, flags);
1716     if (ret != NO_ERROR) {
1717         dprintf(CRITICAL, "Failed(%d) to allocate address space for %s\n", ret,
1718                 name);
1719         goto err_aspace;
1720     }
1721 
1722     ret = alloc_address_map(trusty_app);
1723     if (ret != NO_ERROR) {
1724         dprintf(CRITICAL, "failed(%d) to load address map for %s\n", ret, name);
1725         goto err_map;
1726     }
1727 
1728     /* attach als_cnt */
1729     trusty_app->als = calloc(1, als_slot_cnt * sizeof(void*));
1730     if (!trusty_app->als) {
1731         dprintf(CRITICAL, "failed to allocate local storage for %s\n", name);
1732         ret = ERR_NO_MEMORY;
1733         /* alloc_address_map gets cleaned up by destroying the address space */
1734         goto err_alloc;
1735     }
1736 
1737     /* call all registered startup notifiers */
1738     list_for_every_entry(&app_notifier_list, n, struct trusty_app_notifier,
1739                          node) {
1740         if (!n->startup)
1741             continue;
1742 
1743         ret = n->startup(trusty_app);
1744         if (ret != NO_ERROR) {
1745             dprintf(CRITICAL, "failed(%d) to invoke startup notifier for %s\n",
1746                     ret, name);
1747             goto err_notifier;
1748         }
1749     }
1750 
1751     elf_hdr = (ELF_EHDR*)trusty_app->app_img.img_start;
1752     vaddr_t entry;
1753     __builtin_add_overflow(elf_hdr->e_entry, trusty_app->load_bias, &entry);
1754     trusty_thread = trusty_thread_create(
1755             name, entry, trusty_app->props.priority,
1756             trusty_app->props.min_stack_size,
1757             trusty_app->props.min_shadow_stack_size, trusty_app);
1758 
1759     if (!trusty_thread) {
1760         dprintf(CRITICAL, "failed to allocate trusty thread for %s\n", name);
1761         ret = ERR_NO_MEMORY;
1762         goto err_thread;
1763     }
1764 
1765     trusty_app->thread = trusty_thread;
1766 
1767     trusty_app->state = APP_RUNNING;
1768     ret = trusty_thread_start(trusty_app->thread);
1769 
1770     ASSERT(ret == NO_ERROR);
1771 
1772     return ret;
1773 
1774 err_thread:
1775 err_notifier:
1776     /* n points to failed notifier, or NULL if all were called successfully */
1777     if (n != NULL) {
1778         n = list_prev_type(&app_notifier_list, &n->node,
1779                            struct trusty_app_notifier, node);
1780     } else {
1781         n = list_peek_tail_type(&app_notifier_list, struct trusty_app_notifier,
1782                                 node);
1783     }
1784 
1785     while (n != NULL) {
1786         if (!n->shutdown)
1787             continue;
1788 
1789         if (n->shutdown(trusty_app) != NO_ERROR)
1790             panic("failed to invoke shutdown notifier for %s\n", name);
1791 
1792         n = list_prev_type(&app_notifier_list, &n->node,
1793                            struct trusty_app_notifier, node);
1794     }
1795 
1796     free(trusty_app->als);
1797     trusty_app->als = NULL;
1798 err_alloc:
1799 err_map:
1800     vmm_free_aspace(trusty_app->aspace);
1801     trusty_app->aspace = NULL;
1802 err_aspace:
1803     return ret;
1804 }
1805 
trusty_app_exit_etc(int status,uint32_t crash_reason,bool is_crash,uint64_t far,uint64_t elr)1806 static void __NO_RETURN trusty_app_exit_etc(int status,
1807                                             uint32_t crash_reason,
1808                                             bool is_crash,
1809                                             uint64_t far,
1810                                             uint64_t elr) {
1811     status_t ret;
1812     struct trusty_app* app;
1813     struct trusty_app_notifier* notifier;
1814     lk_time_ns_t restart_timeout;
1815 
1816     app = current_trusty_app();
1817 
1818     DEBUG_ASSERT(app->state == APP_RUNNING);
1819 
1820     LTRACEF("exiting app %u, %s...\n", app->app_id, app->props.app_name);
1821 
1822     if (status) {
1823         TRACEF("%s: exited with exit code %d\n", app->aspace->name, status);
1824         if (!(app->props.mgmt_flags &
1825               APP_MANIFEST_MGMT_FLAGS_NON_CRITICAL_APP)) {
1826             panic("Unclean exit from critical app\n");
1827         }
1828         dump_backtrace();
1829         dprintf(ALWAYS, "%s\n", lk_version);
1830         restart_timeout = TRUSTY_APP_RESTART_TIMEOUT_FAILURE;
1831     } else {
1832         restart_timeout = TRUSTY_APP_RESTART_TIMEOUT_SUCCESS;
1833     }
1834     app->min_start_time = current_time_ns() + restart_timeout;
1835 
1836     list_for_every_entry(&app_notifier_list, notifier,
1837                          struct trusty_app_notifier, node) {
1838         if (!notifier->shutdown)
1839             continue;
1840 
1841         ret = notifier->shutdown(app);
1842         if (ret != NO_ERROR)
1843             panic("shutdown notifier failed(%d) for app %u, %s\n", ret,
1844                   app->app_id, app->props.app_name);
1845     }
1846     /* Do not report normal exits with exit code 0 */
1847     if (is_crash || crash_reason != 0) {
1848         /* Always request obfuscation; logic to conditionally send truth values later */
1849         const struct trusty_error_args error_args = {
1850             .reason = crash_reason,
1851             .is_crash = is_crash,
1852             .far = far,
1853             .elr = elr,
1854             .is_hash = true,
1855         };
1856 
1857         list_for_every_entry(&app_notifier_list, notifier,
1858                              struct trusty_app_notifier, node) {
1859             if (!notifier->crash) {
1860                 continue;
1861             }
1862 
1863             ret = notifier->crash(app, &error_args);
1864             if (ret != NO_ERROR) {
1865                 panic("crash notifier failed(%d) for app %u, %s\n", ret,
1866                       app->app_id, app->props.app_name);
1867             }
1868         }
1869     }
1870 
1871     free(app->als);
1872     app->als = NULL;
1873     mutex_acquire(&apps_lock);
1874     app->state = APP_TERMINATING;
1875     mutex_release(&apps_lock);
1876 
1877     event_signal(&app_mgr_event, false);
1878     trusty_thread_exit(status);
1879 }
1880 
trusty_app_exit(int status)1881 void trusty_app_exit(int status) {
1882     /* Report exits with non-zero status as crashes */
1883     trusty_app_exit_etc(status, (uint32_t)status, false, 0, 0);
1884 }
1885 
trusty_app_crash(uint32_t reason,uint64_t far,uint64_t elr)1886 void trusty_app_crash(uint32_t reason, uint64_t far, uint64_t elr) {
1887     trusty_app_exit_etc(1 /*EXIT_FAILURE*/, reason, true, far, elr);
1888 }
1889 
app_mgr_handle_starting(struct trusty_app * app)1890 static status_t app_mgr_handle_starting(struct trusty_app* app) {
1891     status_t ret;
1892 
1893     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1894     DEBUG_ASSERT(app->state == APP_STARTING);
1895 
1896     LTRACEF("starting app %u, %s\n", app->app_id, app->props.app_name);
1897 
1898     ret = trusty_app_start(app);
1899 
1900     if (ret != NO_ERROR) {
1901         /*
1902          * Drop the lock to call into ipc to kill waiting connections.
1903          * We put the app in the APP_FAILED_TO_START state so no new
1904          * connections are accepted and also to prevent it from being removed.
1905          */
1906         app->state = APP_FAILED_TO_START;
1907 
1908         mutex_release(&apps_lock);
1909         kill_waiting_connections(app);
1910         mutex_acquire(&apps_lock);
1911 
1912         DEBUG_ASSERT(app->state == APP_FAILED_TO_START);
1913     }
1914     return ret;
1915 }
1916 
app_mgr_handle_terminating(struct trusty_app * app)1917 static status_t app_mgr_handle_terminating(struct trusty_app* app) {
1918     status_t ret;
1919     int retcode;
1920     bool restart_app;
1921 
1922     DEBUG_ASSERT(is_mutex_held(&apps_lock));
1923     DEBUG_ASSERT(app->state == APP_TERMINATING || app->state == APP_RESTARTING);
1924 
1925     LTRACEF("waiting for app %u, %s to exit\n", app->app_id,
1926             app->props.app_name);
1927 
1928     ret = thread_join(app->thread->thread, &retcode, INFINITE_TIME);
1929     ASSERT(ret == NO_ERROR);
1930     free(app->thread);
1931     app->thread = NULL;
1932     ret = vmm_free_aspace(app->aspace);
1933     app->aspace = NULL;
1934 
1935     /*
1936      * Panic if app exited with dma active. An unclean exit from a critical app
1937      * will already have panic'ed the kernel so this check will only detect when
1938      * critical apps exit cleanly with dma active and when non-critical apps
1939      * exit for any reason with dma active.
1940      */
1941     if (!list_is_empty(&app->props.dma_entry_list)) {
1942         mutex_release(&apps_lock);
1943         panic("%s: exited(%d) with dma active\n", app->props.app_name, retcode);
1944     }
1945 
1946     if (app->props.mgmt_flags & APP_MANIFEST_MGMT_FLAGS_RESTART_ON_EXIT) {
1947         restart_app = true;
1948     } else if (app->state == APP_TERMINATING) {
1949         /*
1950          * Drop the lock to call into ipc to check for connections. This is safe
1951          * since the app is in the APP_TERMINATING state so it cannot be
1952          * removed. We don't need to do this in APP_RESTARTING since that state
1953          * already marks that a connection is pending. If the app is marked
1954          * restart-on-exit, then we also go ahead with the restart.
1955          */
1956         mutex_release(&apps_lock);
1957         restart_app = has_waiting_connection(app);
1958         /*
1959          * We might get a new connection after has_waiting_connection returns
1960          * false. In that case, request_app_start_locked should change the state
1961          * to APP_RESTARTING
1962          */
1963         mutex_acquire(&apps_lock);
1964     } else {
1965         restart_app = false;
1966     }
1967 
1968     DEBUG_ASSERT(app->state == APP_TERMINATING || app->state == APP_RESTARTING);
1969     if (app->state == APP_RESTARTING) {
1970         restart_app = true;
1971     }
1972 
1973     if (restart_app) {
1974         app->state = APP_STARTING;
1975         event_signal(&app_mgr_event, false);
1976     } else {
1977         app->state = APP_NOT_RUNNING;
1978     }
1979 
1980     return ret;
1981 }
1982 
app_mgr(void * arg)1983 static int app_mgr(void* arg) {
1984     status_t ret;
1985     struct trusty_app* app;
1986     lk_time_ns_t min_start_time = UINT64_MAX;
1987     lk_time_ns_t now = 0;
1988 
1989     while (true) {
1990         lk_time_t timeout_ms = 0;
1991 
1992         if (min_start_time == UINT64_MAX) {
1993             timeout_ms = INFINITE_TIME;
1994         } else {
1995             now = current_time_ns();
1996             if (min_start_time > now) {
1997                 timeout_ms = NS2MS_CEIL(min_start_time - now);
1998             }
1999         }
2000 
2001         LTRACEF("app manager waiting for events with timeout_ms=%d\n",
2002                 (int32_t)timeout_ms);
2003 
2004         event_wait_timeout(&app_mgr_event, timeout_ms);
2005 
2006         mutex_acquire(&apps_lock);
2007 
2008         now = current_time_ns();
2009         min_start_time = UINT64_MAX;
2010 
2011         list_for_every_entry(&trusty_app_list, app, struct trusty_app, node) {
2012             switch (app->state) {
2013             case APP_TERMINATING:
2014             case APP_RESTARTING:
2015                 ret = app_mgr_handle_terminating(app);
2016                 if (ret != NO_ERROR)
2017                     panic("failed(%d) to terminate app %u, %s\n", ret,
2018                           app->app_id, app->props.app_name);
2019                 break;
2020             case APP_NOT_RUNNING:
2021                 break;
2022             case APP_STARTING:
2023                 if (now >= app->min_start_time) {
2024                     ret = app_mgr_handle_starting(app);
2025                     if (ret != NO_ERROR) {
2026                         if (!(app->props.mgmt_flags &
2027                               APP_MANIFEST_MGMT_FLAGS_NON_CRITICAL_APP)) {
2028                             panic("failed(%d) to start app %u, %s\n", ret,
2029                                   app->app_id, app->props.app_name);
2030                         }
2031                         TRACEF("failed(%d) to start app %u, %s\n", ret,
2032                                app->app_id, app->props.app_name);
2033                     }
2034                 } else if (app->min_start_time < min_start_time) {
2035                     min_start_time = app->min_start_time;
2036                 }
2037                 break;
2038             case APP_RUNNING:
2039                 break;
2040             case APP_FAILED_TO_START:
2041                 break;
2042             default:
2043                 panic("unknown state %u for app %u, %s\n", app->state,
2044                       app->app_id, app->props.app_name);
2045             }
2046         }
2047 
2048         mutex_release(&apps_lock);
2049     }
2050 }
2051 
app_mgr_init(void)2052 static void app_mgr_init(void) {
2053     status_t err;
2054     thread_t* app_mgr_thread;
2055 
2056     LTRACEF("Creating app manager thread\n");
2057     app_mgr_thread = thread_create("app manager", &app_mgr, NULL,
2058                                    DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
2059 
2060     if (!app_mgr_thread)
2061         panic("Failed to create app manager thread\n");
2062 
2063     err = thread_resume(app_mgr_thread);
2064     if (err != NO_ERROR)
2065         panic("Failed to start app manager thread\n");
2066 }
2067 
trusty_app_is_startup_port(const char * port_path)2068 bool trusty_app_is_startup_port(const char* port_path) {
2069     struct manifest_port_entry* entry;
2070 
2071     mutex_acquire(&apps_lock);
2072     entry = find_manifest_port_entry_locked(port_path, NULL);
2073     mutex_release(&apps_lock);
2074 
2075     return entry != NULL;
2076 }
2077 
trusty_app_request_start_by_port(const char * port_path,const uuid_t * uuid)2078 status_t trusty_app_request_start_by_port(const char* port_path,
2079                                           const uuid_t* uuid) {
2080     struct manifest_port_entry* entry;
2081     struct trusty_app* owner = NULL;
2082     status_t ret;
2083 
2084     mutex_acquire(&apps_lock);
2085 
2086     entry = find_manifest_port_entry_locked(port_path, &owner);
2087 
2088     if (!owner || ipc_port_check_access(entry->flags, uuid) != NO_ERROR) {
2089         ret = ERR_NOT_FOUND;
2090     } else {
2091         ret = request_app_start_locked(owner);
2092     }
2093 
2094     mutex_release(&apps_lock);
2095 
2096     return ret;
2097 }
2098 
2099 /**
2100  * prel_to_abs_ptr() - Convert a position-relative value to an absolute.
2101  * @ptr: Pointer to a pointer-sized position-relative value.
2102  * @result: Pointer to the location for the result.
2103  *
2104  * Return: %true in case of success, %false for overflow.
2105  */
prel_to_abs_ptr(const intptr_t * ptr,uintptr_t * result)2106 static inline bool prel_to_abs_ptr(const intptr_t* ptr, uintptr_t* result) {
2107     return !__builtin_add_overflow((uintptr_t)ptr, *ptr, result);
2108 }
2109 
trusty_app_init(void)2110 void trusty_app_init(void) {
2111     struct trusty_builtin_app_img* builtin_app_img;
2112 
2113     finalize_registration();
2114 
2115     app_mgr_init();
2116 
2117     for (builtin_app_img = __trusty_app_list_start;
2118          builtin_app_img != __trusty_app_list_end; builtin_app_img++) {
2119         struct trusty_app_img app_img;
2120         if (!prel_to_abs_ptr(&builtin_app_img->manifest_start,
2121                              &app_img.manifest_start) ||
2122             !prel_to_abs_ptr(&builtin_app_img->manifest_end,
2123                              &app_img.manifest_end) ||
2124             !prel_to_abs_ptr(&builtin_app_img->img_start, &app_img.img_start) ||
2125             !prel_to_abs_ptr(&builtin_app_img->img_end, &app_img.img_end)) {
2126             panic("Invalid builtin function entry\n");
2127         }
2128 
2129         if (trusty_app_create(&app_img, NULL, 0) != NO_ERROR)
2130             panic("Failed to create builtin apps\n");
2131     }
2132 }
2133 
2134 /* rather export trusty_app_list?  */
trusty_app_forall(void (* fn)(struct trusty_app * ta,void * data),void * data)2135 void trusty_app_forall(void (*fn)(struct trusty_app* ta, void* data),
2136                        void* data) {
2137     struct trusty_app* ta;
2138 
2139     if (fn == NULL)
2140         return;
2141 
2142     mutex_acquire(&apps_lock);
2143     list_for_every_entry(&trusty_app_list, ta, struct trusty_app, node)
2144             fn(ta, data);
2145     mutex_release(&apps_lock);
2146 }
2147 
start_apps(uint level)2148 static void start_apps(uint level) {
2149     struct trusty_app* trusty_app;
2150 
2151     mutex_acquire(&apps_lock);
2152     list_for_every_entry(&trusty_app_list, trusty_app, struct trusty_app,
2153                          node) {
2154         if (trusty_app->props.mgmt_flags &
2155             APP_MANIFEST_MGMT_FLAGS_DEFERRED_START)
2156             continue;
2157 
2158         request_app_start_locked(trusty_app);
2159     }
2160     mutex_release(&apps_lock);
2161 }
2162 
2163 LK_INIT_HOOK(libtrusty_apps, start_apps, LK_INIT_LEVEL_APPS + 1);
2164