xref: /aosp_15_r20/trusty/kernel/app/userscstest/userscstest.c (revision 344aa361028b423587d4ef3fa52a23d194628137)
1 /*
2  * Copyright (c) 2021, Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <err.h>
25 #include <kernel/thread.h>
26 #include <kernel/vm.h>
27 #include <lib/mmutest/mmutest.h>
28 #include <lib/trusty/trusty_app.h>
29 #include <lib/unittest/unittest.h>
30 
31 #if USER_SCS_ENABLED
32 #define FEATURE_GATED_TEST_NAME(name) name
33 
34 /**
35  * translate_uspace_ptr() - Translate userspace pointer to shadow stack
36  * @uspace:         An address space of a trust app
37  * @uspace_ptr:     Pointer into above user space.
38  * @kspace_ptr_out: Pointer translated into the kernel's address space. The
39  *                  value of this output parameter should not be consumed
40  *                  if this function returned a negative error code.
41  *
42  * Attempt to translate userspace pointer into its kernel equivalent. We
43  * cannot simply dereference a userspace pointer because the TLB isn't
44  * loaded with the correct page table. Instead, we translate the pointer
45  * to physical memory and then into the kernel virtual address space.
46  * Translation can fail the userspace address is not backed by physical
47  * memory; this is the case for guard pages.
48  *
49  * Return:
50  * * NO_ERROR         - if translation succeeded
51  * * ERR_OUT_OF_RANGE - if address translation failed
52  * * ERR_NOT_FOUND    - if pointer is not backed by physical memory
53  * * ERR_INVALID_ARGS - if kspace_ptr_out is NULL
54  */
translate_uspace_ptr(struct vmm_aspace * uspace,vaddr_t uspace_ptr,vaddr_t * kspace_ptr_out)55 static int translate_uspace_ptr(struct vmm_aspace* uspace,
56                                 vaddr_t uspace_ptr,
57                                 vaddr_t* kspace_ptr_out) {
58     paddr_t phys_ptr;
59     uint flags;
60 
61     if (kspace_ptr_out == NULL) {
62         return ERR_INVALID_ARGS;
63     }
64     *kspace_ptr_out = 0;
65 
66     /* translate userspace virtual address to physical address */
67     status_t res =
68             arch_mmu_query(&uspace->arch_aspace, uspace_ptr, &phys_ptr, &flags);
69     /*
70      * failures can happen if the pointer is invalid or points to a guard page
71      * not backed by physical memory (in which case res is ERR_NOT_FOUND).
72      */
73     if (res) {
74         return res;
75     }
76 
77     EXPECT_EQ(flags, ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_PERM_USER,
78               "Shadow call stack must point to non-executable user memory");
79 
80     /* translate physical address to kernel virtual address */
81     *kspace_ptr_out = (vaddr_t)paddr_to_kvaddr(phys_ptr);
82     ASSERT_NE(0, *kspace_ptr_out,
83               "Failed to map phys addr to kernel virtual addr");
84 
85     return NO_ERROR;
86 test_abort:
87     return ERR_OUT_OF_RANGE;
88 }
89 
90 struct scs_test_status {
91     uint32_t running_apps;
92     uint32_t invalid_apps;
93 };
94 
95 /**
96  * trusty_app_callback() - Test that app has a valid user shadow call stack
97  *
98  * @ta: Application to test
99  * @status: Pointer to a structure tracking the current test status.
100  */
trusty_app_callback(struct trusty_app * ta,void * status)101 static void trusty_app_callback(struct trusty_app* ta, void* status) {
102     struct scs_test_status* status_ptr = (struct scs_test_status*)status;
103     if (strcmp(ta->props.app_name, "userscs-custom") == 0) {
104         /* were we able to request a custom shadow stack size? */
105         ASSERT_EQ(ta->props.min_shadow_stack_size, 128);
106     } else if (strcmp(ta->props.app_name, "userscs-disabled") == 0) {
107         /* were we able to opt out of shadow stacks? */
108         ASSERT_EQ(ta->props.min_shadow_stack_size, 0);
109         /* userscs-* apps loop infinitely so they'll always have a thread */
110         ASSERT_NE((void*)ta->thread, NULL, "App has thread");
111         ASSERT_EQ((void*)ta->thread->shadow_stack_base, NULL,
112                   "Shadow call stack was disabled");
113         return;
114     } else if (strcmp(ta->props.app_name, "userscs-default") == 0) {
115         /* did default scs app get the default shadow stack size? */
116         ASSERT_EQ(ta->props.min_shadow_stack_size, DEFAULT_SHADOW_STACK_SIZE,
117                   "Expected shadow call stack to have the default size");
118     }
119 
120     /* size must be a multiple of the pointer size */
121     ASSERT_EQ(0, ta->props.min_shadow_stack_size % sizeof(vaddr_t),
122               "Shadow call stack size is not a multiple of the pointer size");
123 
124     /*
125      * Apps that aren't running may not have a thread allocated. Moreover,
126      * apps that opt out of shadow call stacks need no further inspection.
127      */
128     if (ta->state != APP_RUNNING || ta->props.min_shadow_stack_size == 0) {
129         return;
130     }
131     status_ptr->running_apps++;
132 
133     struct trusty_thread* tt = ta->thread;
134     ASSERT_NE((void*)tt, NULL, "Running trusty app must have a valid thread");
135     ASSERT_NE((void*)tt->shadow_stack_base, NULL,
136               "Shadow call stack must point to allocation");
137 
138     ASSERT_EQ(false, is_kernel_address(tt->shadow_stack_base),
139               "Shadow stack on user thread points to kernel memory");
140 
141     ASSERT_NE(tt->stack_start, tt->shadow_stack_base,
142               "Shadow stack on user thread aliases the regular stack");
143 
144     /*
145      * Check the shadow stack size by examining the last element and one past
146      * the last element. Note that these pointers are valid in the address
147      * space of the trusty app, not in the current address space of a kernel
148      * app. Therefore, we translate from the app's address space to the kernel
149      * addres space before dereferencing lest we generate an access violation.
150      */
151 
152     vaddr_t past_last = (vaddr_t)tt->shadow_stack_base + tt->shadow_stack_size;
153     vaddr_t last_elem = past_last - sizeof(vaddr_t);
154     /* a whole number of pages is allocated no matter the shadow stack size */
155     vaddr_t pre_first = last_elem - round_up(tt->shadow_stack_size, PAGE_SIZE);
156     vaddr_t elem_translated, ignored;
157 
158     struct vmm_aspace* uspace = ta->aspace;
159 
160     ASSERT_EQ(NO_ERROR,
161               translate_uspace_ptr(uspace, last_elem, &elem_translated),
162               "Actual size of shadow stack differs from recorded size");
163     /*
164      * Check that test app uses its shadow stack as expected. The shadow call
165      * stack is zero-initialized and once a stack slot is used it can never
166      * become zero again.
167      */
168     if (strcmp(ta->props.app_name, "userscs-default") == 0) {
169         EXPECT_EQ(0, *(vaddr_t*)elem_translated,
170                   "Expected last element of shadow stack to be zero "
171                   "(unused)");
172 
173         ASSERT_EQ(NO_ERROR, translate_uspace_ptr(uspace, tt->shadow_stack_base,
174                                                  &elem_translated));
175         /*
176          * The link register is initially zero so when shadow call stacks are
177          * enabled for libc, the second element holds the first non-zero entry
178          */
179         EXPECT_NE(0, *(vaddr_t*)elem_translated + sizeof(vaddr_t),
180                   "Expected second element of shadow stack to be non-zero "
181                   "(used)");
182     }
183 
184     ASSERT_EQ(ERR_NOT_FOUND, translate_uspace_ptr(uspace, past_last, &ignored),
185               "Expected guard page after shadow stack on user thread");
186 
187     ASSERT_EQ(ERR_NOT_FOUND, translate_uspace_ptr(uspace, pre_first, &ignored),
188               "Expected guard page before shadow stack on user thread");
189 
190     return;
191 test_abort:
192     status_ptr->invalid_apps++;
193 }
194 
inspect_trusty_threads(void)195 static int inspect_trusty_threads(void) {
196     struct scs_test_status status = {0};
197     trusty_app_forall(trusty_app_callback, &status);
198     ASSERT_NE(0, status.running_apps);
199     return status.invalid_apps;
200 test_abort:
201     return -1;
202 }
203 #else
204 #define FEATURE_GATED_TEST_NAME(name) DISABLED_##name
205 
inspect_trusty_threads(void)206 static int inspect_trusty_threads(void) {
207     return 0;
208 }
209 
210 #endif
211 
TEST(userscstest,FEATURE_GATED_TEST_NAME (user_threads_have_scs))212 TEST(userscstest, FEATURE_GATED_TEST_NAME(user_threads_have_scs)) {
213     int res = inspect_trusty_threads();
214     EXPECT_NE(-1, res, "There were no running apps with threads to inspect");
215     EXPECT_EQ(0, res,
216               "One or more apps did not have the expected shadow call stack");
217 }
218 
219 PORT_TEST(userscstest, "com.android.kernel.userscstest");
220