1 /*
2 * Copyright (c) 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 /*
25 * This module registers smc handlers that are called by tests running in the
26 * client os. This api is currently only available if lib/sm is enabled.
27 */
28 #if WITH_LIB_SM
29
30 #include <arch/arch_ops.h>
31 #include <arch/ops.h>
32 #include <err.h>
33 #include <inttypes.h>
34 #include <kernel/thread.h>
35 #include <kernel/vm.h>
36 #include <lib/sm.h>
37 #include <lib/sm/sm_err.h>
38 #include <lib/sm/smcall.h>
39 #include <lib/smc/smc.h>
40 #include <limits.h>
41 #include <lk/init.h>
42 #include <stdatomic.h>
43 #include <string.h>
44 #include <trace.h>
45
46 #include "stdcalltest.h"
47
args_get_id(struct smc32_args * args)48 static ext_mem_obj_id_t args_get_id(struct smc32_args* args) {
49 return (((uint64_t)args->params[1] << 32) | args->params[0]);
50 }
51
args_get_sz(struct smc32_args * args)52 static size_t args_get_sz(struct smc32_args* args) {
53 return (size_t)args->params[2];
54 }
55
56 /**
57 * stdcalltest_sharedmem_rw - Test shared memory buffer.
58 * @id: Shared memory id.
59 * @size: Size.
60 *
61 * Check that buffer contains the 64 bit integer sqequnce [0, 1, 2, ...,
62 * @size / 8 - 1] and modify sequence to [@size, @size - 1, size - 2, ...,
63 * @size - (@size / 8 - 1)].
64 *
65 * Return: 0 on success. SM_ERR_INVALID_PARAMETERS is buffer does not contain
66 * expected input pattern. SM_ERR_INTERNAL_FAILURE if @id could not be mapped.
67 */
stdcalltest_sharedmem_rw(ext_mem_client_id_t client_id,ext_mem_obj_id_t mem_obj_id,size_t size)68 static long stdcalltest_sharedmem_rw(ext_mem_client_id_t client_id,
69 ext_mem_obj_id_t mem_obj_id,
70 size_t size) {
71 struct vmm_aspace* aspace = vmm_get_kernel_aspace();
72 status_t ret;
73 long status;
74 void* va;
75 uint64_t* va64;
76
77 if (!IS_PAGE_ALIGNED(size)) {
78 return SM_ERR_INVALID_PARAMETERS;
79 }
80
81 ret = ext_mem_map_obj_id(aspace, "stdcalltest", client_id, mem_obj_id, 0, 0,
82 size, &va, PAGE_SIZE_SHIFT, 0,
83 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
84 if (ret != NO_ERROR) {
85 status = SM_ERR_INTERNAL_FAILURE;
86 goto err_map;
87 }
88 va64 = va;
89
90 for (size_t i = 0; i < size / sizeof(*va64); i++) {
91 if (va64[i] != i) {
92 TRACEF("input mismatch at %zd, got 0x%" PRIx64
93 " instead of 0x%zx\n",
94 i, va64[i], i);
95 status = SM_ERR_INVALID_PARAMETERS;
96 goto err_input_mismatch;
97 }
98 va64[i] = size - i;
99 }
100 status = 0;
101
102 err_input_mismatch:
103 ret = vmm_free_region(aspace, (vaddr_t)va);
104 if (ret) {
105 status = SM_ERR_INTERNAL_FAILURE;
106 }
107 err_map:
108 return status;
109 }
110
111 #if ARCH_ARM64
112 long clobber_sve_asm(uint32_t byte_clobber);
113 long load_sve_asm(uint8_t* arr, uint64_t len);
114
115 #define SVE_VEC_LEN_BITS 128
116 #define SVE_NB_BYTE_VEC_LEN SVE_VEC_LEN_BITS / 8
117 #define SVE_SVE_REGS_COUNT 32
118
119 #define SMC_FC_TRNG_VERSION SMC_FASTCALL_NR(SMC_ENTITY_STD, 0x50)
120
121 static uint8_t sve_regs[SMP_MAX_CPUS][SVE_SVE_REGS_COUNT * SVE_NB_BYTE_VEC_LEN]
122 __attribute__((aligned(16)));
123
124 enum clobber_restore_error {
125 SVE_NO_ERROR = 0,
126 SVE_GENERIC_ERROR = 1,
127 SVE_REGISTER_NOT_RESTORED = 2,
128 SVE_ERROR_LONG_TYPE = LONG_MAX
129 };
130
stdcalltest_clobber_sve(struct smc32_args * args)131 long stdcalltest_clobber_sve(struct smc32_args* args) {
132 enum clobber_restore_error ret = SVE_NO_ERROR;
133 if (!arch_sve_supported()) {
134 /* test is OK, if there is no SVE there is nothing to assert but this is
135 * not an ERROR */
136 return ret;
137 }
138
139 uint64_t v_cpacr_el1 = arch_enable_sve();
140 uint cpuid = arch_curr_cpu_num();
141 long call_nb = args->params[1];
142
143 /* First Call on cpu needs to Clobber ASM registers */
144 if (call_nb == 1) {
145 ret = clobber_sve_asm(args->params[0]);
146 if (ret != SVE_NO_ERROR) {
147 panic("Failed to Clobber ARM SVE registers: %lx\n", ret);
148 ret = SVE_GENERIC_ERROR;
149 goto end_stdcalltest_clobber_sve;
150 }
151 }
152
153 /* Make sure registers are as expected */
154 const uint8_t EXPECTED = (uint8_t)args->params[0];
155 ret = load_sve_asm(sve_regs[cpuid], SVE_NB_BYTE_VEC_LEN);
156 if (ret != SVE_NO_ERROR) {
157 panic("Failed to Load ARM SVE registers: %lx\n", ret);
158 ret = SVE_GENERIC_ERROR;
159 goto end_stdcalltest_clobber_sve;
160 }
161
162 for (size_t idx = 0; idx < countof(sve_regs[cpuid]); ++idx) {
163 uint8_t val = sve_regs[cpuid][idx];
164
165 if (val != EXPECTED) {
166 ret = SVE_REGISTER_NOT_RESTORED;
167 goto end_stdcalltest_clobber_sve;
168 }
169 }
170
171 end_stdcalltest_clobber_sve:
172 ARM64_WRITE_SYSREG(cpacr_el1, v_cpacr_el1);
173 return ret;
174 }
175
stdcalltest_compute_fpacr(uint64_t * old_cpacr,uint64_t * new_cpacr)176 static long stdcalltest_compute_fpacr(uint64_t* old_cpacr,
177 uint64_t* new_cpacr) {
178 uint64_t cpacr = ARM64_READ_SYSREG(cpacr_el1);
179
180 DEBUG_ASSERT(old_cpacr);
181 DEBUG_ASSERT(new_cpacr);
182
183 if ((cpacr >> 20) & 1) {
184 return SM_ERR_NOT_ALLOWED;
185 }
186
187 *old_cpacr = cpacr;
188 *new_cpacr = cpacr | (3 << 20);
189 return 0;
190 }
191
stdcalltest_random_u32(void)192 static uint32_t stdcalltest_random_u32(void) {
193 /* Initialize the RNG seed to the golden ratio */
194 static atomic_int hash = 0x9e3779b1U;
195 int oldh, newh;
196
197 /* Update the RNG with MurmurHash3 */
198 do {
199 newh = oldh = atomic_load(&hash);
200 newh ^= newh >> 16;
201 __builtin_mul_overflow(newh, 0x85ebca6bU, &newh);
202 newh ^= newh >> 13;
203 __builtin_mul_overflow(newh, 0xc2b2ae35U, &newh);
204 newh ^= newh >> 16;
205 } while (!atomic_compare_exchange_weak(&hash, &oldh, newh));
206
207 return (uint32_t)oldh;
208 }
209
210 static struct fpstate stdcalltest_random_fpstate;
211
stdcalltest_clobber_fpsimd_clobber(struct smc32_args * args)212 static long stdcalltest_clobber_fpsimd_clobber(struct smc32_args* args) {
213 long ret;
214 uint64_t old_cpacr, new_cpacr;
215 bool loaded;
216
217 /*
218 * Check if the FPU at EL1 is already on;
219 * it shouldn't be, so return an error if it is.
220 * Otherwise, save the old value and restore it
221 * after we're done.
222 */
223 ret = stdcalltest_compute_fpacr(&old_cpacr, &new_cpacr);
224 if (ret) {
225 return ret;
226 }
227
228 for (size_t i = 0; i < countof(stdcalltest_random_fpstate.regs); i++) {
229 stdcalltest_random_fpstate.regs[i] =
230 ((uint64_t)stdcalltest_random_u32() << 32) |
231 stdcalltest_random_u32();
232 }
233 /*
234 * TODO: set FPCR&FPSR to random values, but they need to be masked
235 * because many of their bits are MBZ
236 */
237 stdcalltest_random_fpstate.fpcr = 0;
238 stdcalltest_random_fpstate.fpsr = 0;
239
240 ARM64_WRITE_SYSREG(cpacr_el1, new_cpacr);
241 loaded = arm64_fpu_load_fpstate(&stdcalltest_random_fpstate, true);
242 ARM64_WRITE_SYSREG(cpacr_el1, old_cpacr);
243 return loaded ? 0 : SM_ERR_INTERNAL_FAILURE;
244 }
245
stdcalltest_clobber_fpsimd_check(struct smc32_args * args)246 static long stdcalltest_clobber_fpsimd_check(struct smc32_args* args) {
247 long ret;
248 uint64_t old_cpacr, new_cpacr;
249 struct fpstate new_fpstate;
250 bool loaded;
251
252 ret = stdcalltest_compute_fpacr(&old_cpacr, &new_cpacr);
253 if (ret) {
254 return ret;
255 }
256
257 ARM64_WRITE_SYSREG(cpacr_el1, new_cpacr);
258 loaded = arm64_fpu_load_fpstate(&stdcalltest_random_fpstate, false);
259 arm64_fpu_save_fpstate(&new_fpstate);
260 ARM64_WRITE_SYSREG(cpacr_el1, old_cpacr);
261
262 if (loaded) {
263 /*
264 * Check whether the current fpstate is still the one set
265 * earlier by the clobber. If not, it means another thread
266 * ran and overwrote our registers, and we do not want to
267 * leak them here.
268 */
269 ret = SM_ERR_BUSY;
270 goto err;
271 }
272
273 for (size_t i = 0; i < countof(new_fpstate.regs); i++) {
274 if (new_fpstate.regs[i] != stdcalltest_random_fpstate.regs[i]) {
275 TRACEF("regs[%zu] mismatch: %" PRIx64 " != %" PRIx64 "\n", i,
276 new_fpstate.regs[i], stdcalltest_random_fpstate.regs[i]);
277 ret = SM_ERR_INTERNAL_FAILURE;
278 goto err;
279 }
280 }
281 if (new_fpstate.fpcr != stdcalltest_random_fpstate.fpcr) {
282 TRACEF("FPCR mismatch: %" PRIx32 " != %" PRIx32 "\n", new_fpstate.fpcr,
283 stdcalltest_random_fpstate.fpcr);
284 ret = SM_ERR_INTERNAL_FAILURE;
285 goto err;
286 }
287 if (new_fpstate.fpsr != stdcalltest_random_fpstate.fpsr) {
288 TRACEF("FPSR mismatch: %" PRIx32 " != %" PRIx32 "\n", new_fpstate.fpsr,
289 stdcalltest_random_fpstate.fpsr);
290 ret = SM_ERR_INTERNAL_FAILURE;
291 goto err;
292 }
293
294 /* Return 0 on success */
295 ret = 0;
296
297 err:
298 return ret;
299 }
300 #endif
301
stdcalltest_stdcall(struct smc32_args * args)302 static long stdcalltest_stdcall(struct smc32_args* args) {
303 switch (args->smc_nr) {
304 case SMC_SC_TEST_VERSION:
305 return TRUSTY_STDCALLTEST_API_VERSION;
306 case SMC_SC_TEST_SHARED_MEM_RW:
307 return stdcalltest_sharedmem_rw(args->client_id, args_get_id(args),
308 args_get_sz(args));
309 #if ARCH_ARM64
310 case SMC_SC_TEST_CLOBBER_SVE: {
311 return stdcalltest_clobber_sve(args);
312 }
313 #endif
314 default:
315 return SM_ERR_UNDEFINED_SMC;
316 }
317 }
318
stdcalltest_fastcall(struct smc32_args * args)319 static long stdcalltest_fastcall(struct smc32_args* args) {
320 switch (args->smc_nr) {
321 #if ARCH_ARM64
322 case SMC_FC_TEST_CLOBBER_FPSIMD_CLOBBER:
323 return stdcalltest_clobber_fpsimd_clobber(args);
324 case SMC_FC_TEST_CLOBBER_FPSIMD_CHECK:
325 return stdcalltest_clobber_fpsimd_check(args);
326 #else
327 /* This test is a no-op on other architectures, e.g., arm32 */
328 case SMC_FC_TEST_CLOBBER_FPSIMD_CLOBBER:
329 case SMC_FC_TEST_CLOBBER_FPSIMD_CHECK:
330 return 0;
331 #endif
332 default:
333 return SM_ERR_UNDEFINED_SMC;
334 }
335 }
336
337 static struct smc32_entity stdcalltest_sm_entity = {
338 .stdcall_handler = stdcalltest_stdcall,
339 .fastcall_handler = stdcalltest_fastcall,
340 };
341
stdcalltest_init(uint level)342 static void stdcalltest_init(uint level) {
343 int err;
344
345 err = sm_register_entity(SMC_ENTITY_TEST, &stdcalltest_sm_entity);
346 if (err) {
347 printf("trusty error register entity: %d\n", err);
348 }
349 }
350 LK_INIT_HOOK(stdcalltest, stdcalltest_init, LK_INIT_LEVEL_APPS);
351
352 #endif
353