1 /*
2 * Copyright (c) 2013, Google, Inc. All rights reserved
3 * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <assert.h>
26 #include <debug.h>
27 #include <err.h>
28 #include <kernel/mutex.h>
29 #include <kernel/thread.h>
30 #include <kernel/usercopy.h>
31 #include <lk/macros.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <trace.h>
35 #include <uapi/mm.h>
36
37 #include <lib/trusty/memref.h>
38 #include <lib/trusty/sys_fd.h>
39 #include <lib/trusty/trusty_app.h>
40 #include <lib/trusty/uctx.h>
41 #include <lib/trusty/uio.h>
42 #include <platform.h>
43 #if LK_LIBC_IMPLEMENTATION_IS_MUSL
44 #include <trusty/io_handle.h>
45 #endif
46
47 #include "util.h"
48
49 #define LOCAL_TRACE 0
50
51 static ssize_t sys_std_writev(uint32_t fd,
52 user_addr_t iov_uaddr,
53 uint32_t iov_cnt);
54
55 static mutex_t fd_lock = MUTEX_INITIAL_VALUE(fd_lock);
56
57 static const struct sys_fd_ops sys_std_fd_op = {
58 .writev = sys_std_writev,
59 };
60
61 static struct sys_fd_ops const* sys_fds[MAX_SYS_FD_HADLERS] = {
62 [1] = &sys_std_fd_op, /* stdout */
63 [2] = &sys_std_fd_op, /* stderr */
64 };
65
install_sys_fd_handler(uint32_t fd,const struct sys_fd_ops * ops)66 status_t install_sys_fd_handler(uint32_t fd, const struct sys_fd_ops* ops) {
67 status_t ret;
68
69 if (fd >= countof(sys_fds))
70 return ERR_INVALID_ARGS;
71
72 mutex_acquire(&fd_lock);
73 if (!sys_fds[fd]) {
74 sys_fds[fd] = ops;
75 ret = NO_ERROR;
76 } else {
77 ret = ERR_ALREADY_EXISTS;
78 }
79 mutex_release(&fd_lock);
80 return ret;
81 }
82
get_sys_fd_handler(uint32_t fd)83 static const struct sys_fd_ops* get_sys_fd_handler(uint32_t fd) {
84 const struct sys_fd_ops* ops;
85
86 ops = uctx_get_fd_ops(fd);
87 if (ops)
88 return ops;
89
90 if (fd >= countof(sys_fds))
91 return NULL;
92
93 return sys_fds[fd];
94 }
95
valid_address(vaddr_t addr,const u_int size)96 static bool valid_address(vaddr_t addr, const u_int size) {
97 u_int rsize = round_up(size + (addr & (PAGE_SIZE - 1)), PAGE_SIZE);
98 addr = round_down(addr, PAGE_SIZE);
99
100 /* Ensure size did not overflow */
101 if (rsize < size) {
102 return false;
103 }
104
105 while (rsize) {
106 if (!is_user_address(addr) || !vaddr_to_paddr((void*)addr)) {
107 return false;
108 }
109 addr += PAGE_SIZE;
110 rsize -= PAGE_SIZE;
111 }
112
113 return true;
114 }
115
116 /* handle stdout/stderr */
sys_std_writev(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)117 static ssize_t sys_std_writev(uint32_t fd,
118 user_addr_t iov_uaddr,
119 uint32_t iov_cnt) {
120 /*
121 * Even if we're suppressing the output, we need to process the data to
122 * produce the correct return code.
123 */
124 bool should_output = INFO <= LK_LOGLEVEL;
125 io_handle_t* io_handle = fd_io_handle(fd);
126 if (io_handle == NULL) {
127 return ERR_BAD_HANDLE;
128 }
129 uint8_t buf[128];
130
131 if (should_output) {
132 io_lock(io_handle);
133 }
134
135 struct iovec_iter iter = iovec_iter_create(iov_cnt);
136 size_t total_bytes = 0;
137 int ret;
138
139 while (iovec_iter_has_next(&iter)) {
140 ret = user_iovec_to_membuf_iter(buf, sizeof(buf), iov_uaddr, &iter);
141 if (ret < 0) {
142 goto write_done;
143 }
144 total_bytes += ret;
145 if (should_output) {
146 ret = io_write(io_handle, (const void*)buf, ret);
147 if (ret < 0) {
148 goto write_done;
149 }
150 }
151 }
152 ret = total_bytes;
153
154 write_done:
155 if (should_output) {
156 io_write_commit(io_handle);
157 io_unlock(io_handle);
158 }
159 return ret;
160 }
161
sys_writev(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)162 long sys_writev(uint32_t fd, user_addr_t iov_uaddr, uint32_t iov_cnt) {
163 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
164
165 if (ops && ops->writev)
166 return ops->writev(fd, iov_uaddr, iov_cnt);
167
168 return ERR_NOT_SUPPORTED;
169 }
170
sys_brk(void * u_brk)171 void* sys_brk(void* u_brk) {
172 vaddr_t brk = (vaddr_t)u_brk;
173 struct trusty_app* trusty_app = current_trusty_app();
174 if (!brk)
175 return (void*)trusty_app->cur_brk;
176 /* check if this is the first sbrk */
177 if (!trusty_app->used_brk) {
178 uint vmm_flags = VMM_FLAG_QUOTA;
179 status_t ret;
180 size_t size = round_up(trusty_app->end_brk - trusty_app->start_brk,
181 PAGE_SIZE);
182 vmm_flags |= VMM_FLAG_VALLOC_SPECIFIC;
183 ret = vmm_alloc(
184 trusty_app->aspace, "brk_heap", size,
185 (void*)&trusty_app->start_brk, 0, vmm_flags,
186 ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE);
187 if (ret) {
188 TRACEF("sbrk heap allocation failed!\n");
189 return (void*)trusty_app->cur_brk;
190 }
191 trusty_app->used_brk = true;
192 }
193
194 /* update brk, if within range */
195 if ((brk >= trusty_app->start_brk) && (brk <= trusty_app->end_brk)) {
196 trusty_app->cur_brk = brk;
197 }
198 return (void*)trusty_app->cur_brk;
199 }
200
sys_exit_etc(int32_t status,uint32_t flags)201 long sys_exit_etc(int32_t status, uint32_t flags) {
202 thread_t* current = get_current_thread();
203 LTRACEF("exit called, thread %p, name %s\n", current, current->name);
204 trusty_app_exit(status);
205 return 0L;
206 }
207
sys_readv(uint32_t fd,user_addr_t iov_uaddr,uint32_t iov_cnt)208 long sys_readv(uint32_t fd, user_addr_t iov_uaddr, uint32_t iov_cnt) {
209 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
210
211 if (ops && ops->readv)
212 return ops->readv(fd, iov_uaddr, iov_cnt);
213
214 return ERR_NOT_SUPPORTED;
215 }
216
sys_ioctl(uint32_t fd,uint32_t req,user_addr_t user_ptr)217 long sys_ioctl(uint32_t fd, uint32_t req, user_addr_t user_ptr) {
218 const struct sys_fd_ops* ops = get_sys_fd_handler(fd);
219
220 if (ops && ops->ioctl)
221 return ops->ioctl(fd, req, user_ptr);
222
223 return ERR_NOT_SUPPORTED;
224 }
225
226 #if IS_64BIT && USER_32BIT
sys_nanosleep(uint32_t clock_id,uint32_t flags,uint32_t sleep_time_l,uint32_t sleep_time_h)227 long sys_nanosleep(uint32_t clock_id,
228 uint32_t flags,
229 uint32_t sleep_time_l,
230 uint32_t sleep_time_h) {
231 uint64_t sleep_time = sleep_time_l + ((uint64_t)sleep_time_h << 32);
232 thread_sleep_ns(sleep_time);
233
234 return NO_ERROR;
235 }
236 #else
sys_nanosleep(uint32_t clock_id,uint32_t flags,uint64_t sleep_time)237 long sys_nanosleep(uint32_t clock_id, uint32_t flags, uint64_t sleep_time) {
238 thread_sleep_ns(sleep_time);
239
240 return NO_ERROR;
241 }
242 #endif
243
sys_gettime(uint32_t clock_id,uint32_t flags,user_addr_t time)244 long sys_gettime(uint32_t clock_id, uint32_t flags, user_addr_t time) {
245 // return time in nanoseconds
246 lk_time_ns_t t = current_time_ns();
247
248 return copy_to_user(time, &t, sizeof(int64_t));
249 }
250
sys_mmap(user_addr_t uaddr,uint32_t size,uint32_t flags,uint32_t handle_id)251 long sys_mmap(user_addr_t uaddr,
252 uint32_t size,
253 uint32_t flags,
254 uint32_t handle_id) {
255 struct trusty_app* trusty_app = current_trusty_app();
256 long ret;
257
258 if (flags & MMAP_FLAG_IO_HANDLE) {
259 /*
260 * Only allows mapping on IO region specified by handle (id) and uaddr
261 * must be 0 for now.
262 * TBD: Add support in to use uaddr as a hint.
263 */
264 if (uaddr != 0 || flags & MMAP_FLAG_ANONYMOUS) {
265 return ERR_INVALID_ARGS;
266 }
267
268 ret = trusty_app_setup_mmio(trusty_app, handle_id, &uaddr, size);
269 if (ret != NO_ERROR) {
270 return ret;
271 }
272
273 return uaddr;
274 } else if (flags & MMAP_FLAG_ANONYMOUS) {
275 /*
276 * Same as above, uaddr must be 0 for now.
277 * TBD: Add support to use addr as a hint.
278 */
279 if (uaddr != 0 && !(flags & MMAP_FLAG_FIXED_NOREPLACE)) {
280 return ERR_INVALID_ARGS;
281 }
282
283 uint32_t mmu_flags = 0;
284 ret = xlat_flags(flags, flags, &mmu_flags);
285 if (ret != NO_ERROR) {
286 LTRACEF("error translating memory protection flags in mmap\n");
287 return ret;
288 }
289
290 vaddr_t vaddr = uaddr;
291 void* ptr = (void*)vaddr;
292 uint vmm_flags = VMM_FLAG_QUOTA;
293 if (flags & MMAP_FLAG_FIXED_NOREPLACE) {
294 if (!uaddr) {
295 LTRACEF("a fixed allocation requires a non-NULL pointer\n");
296 return ERR_INVALID_ARGS;
297 }
298 vmm_flags |= VMM_FLAG_VALLOC_SPECIFIC;
299 }
300 if (flags & MMAP_FLAG_NO_PHYSICAL) {
301 if (!(flags & MMAP_FLAG_PROT_WRITE)) {
302 LTRACEF("a NO_PHYSICAL allocation must allow write access\n");
303 return ERR_INVALID_ARGS;
304 }
305 vmm_flags |= VMM_FLAG_NO_PHYSICAL;
306 if (uaddr) {
307 LTRACEF("a NO_PHYSICAL allocation cannot be specific\n");
308 return ERR_INVALID_ARGS;
309 }
310 }
311 ret = vmm_alloc(trusty_app->aspace, "mmap", size, &ptr, 0, vmm_flags,
312 mmu_flags);
313 if (ret != NO_ERROR) {
314 LTRACEF("error mapping anonymous region\n");
315 return ret;
316 }
317
318 return (long)ptr;
319 } else {
320 struct handle* handle;
321 ret = uctx_handle_get(current_uctx(), handle_id, &handle);
322 if (ret != NO_ERROR) {
323 LTRACEF("mmapped nonexistent handle\n");
324 return ret;
325 }
326
327 ret = handle_mmap(handle, 0, size, flags, &uaddr);
328 handle_decref(handle);
329 if (ret != NO_ERROR) {
330 LTRACEF("handle_mmap failed\n");
331 return ret;
332 }
333
334 return uaddr;
335 }
336 }
337
sys_munmap(user_addr_t uaddr,uint32_t size)338 long sys_munmap(user_addr_t uaddr, uint32_t size) {
339 struct trusty_app* trusty_app = current_trusty_app();
340
341 /*
342 * vmm_free_region always unmaps whole region.
343 * TBD: Add support to unmap partial region when there's use case.
344 */
345 return vmm_free_region_etc(trusty_app->aspace, uaddr, size, 0);
346 }
347
sys_prepare_dma(user_addr_t uaddr,uint32_t size,uint32_t flags,user_addr_t pmem)348 long sys_prepare_dma(user_addr_t uaddr,
349 uint32_t size,
350 uint32_t flags,
351 user_addr_t pmem) {
352 struct dma_pmem kpmem;
353 size_t mapped_size = 0;
354 uint32_t entries = 0;
355 long ret;
356 vaddr_t vaddr = uaddr;
357
358 LTRACEF("uaddr 0x%" PRIxPTR_USER
359 ", size 0x%x, flags 0x%x, pmem 0x%" PRIxPTR_USER "\n",
360 uaddr, size, flags, pmem);
361
362 if (size == 0)
363 return ERR_INVALID_ARGS;
364
365 if ((flags & DMA_FLAG_NO_PMEM) && pmem)
366 return ERR_INVALID_ARGS;
367
368 struct trusty_app* trusty_app = current_trusty_app();
369 struct vmm_obj_slice slice;
370 vmm_obj_slice_init(&slice);
371
372 ret = vmm_get_obj(trusty_app->aspace, vaddr, size, &slice);
373 if (ret != NO_ERROR)
374 return ret;
375
376 if (!slice.obj || !slice.obj->ops) {
377 ret = ERR_NOT_VALID;
378 goto err;
379 }
380
381 /* Check if caller wants physical addresses returned */
382 if (flags & DMA_FLAG_NO_PMEM) {
383 mapped_size = size;
384 } else {
385 do {
386 paddr_t paddr;
387 size_t paddr_size;
388 ret = slice.obj->ops->get_page(
389 slice.obj, slice.offset + mapped_size, &paddr, &paddr_size);
390 if (ret != NO_ERROR)
391 goto err;
392
393 memset(&kpmem, 0, sizeof(kpmem));
394 kpmem.paddr = paddr;
395 kpmem.size = MIN(size - mapped_size, paddr_size);
396
397 /*
398 * Here, kpmem.size is either the remaining mapping size
399 * (size - mapping_size)
400 * or the distance to a page boundary that is not physically
401 * contiguous with the next page mapped in the given virtual
402 * address range.
403 * In either case it marks the end of the current kpmem record.
404 */
405
406 ret = copy_to_user(pmem, &kpmem, sizeof(struct dma_pmem));
407 if (ret != NO_ERROR)
408 goto err;
409
410 pmem += sizeof(struct dma_pmem);
411
412 mapped_size += kpmem.size;
413 entries++;
414
415 } while (mapped_size < size && (flags & DMA_FLAG_MULTI_PMEM));
416 }
417
418 if (flags & DMA_FLAG_FROM_DEVICE)
419 arch_clean_invalidate_cache_range(vaddr, mapped_size);
420 else
421 arch_clean_cache_range(vaddr, mapped_size);
422
423 if (!(flags & DMA_FLAG_ALLOW_PARTIAL) && mapped_size != size) {
424 ret = ERR_BAD_LEN;
425 goto err;
426 }
427
428 ret = trusty_app_allow_dma_range(trusty_app, slice.obj, slice.offset,
429 slice.size, vaddr, flags);
430 if (ret != NO_ERROR) {
431 goto err;
432 }
433
434 ret = entries; /* fallthrough */
435 err:
436 vmm_obj_slice_release(&slice);
437 return ret;
438 }
439
sys_finish_dma(user_addr_t uaddr,uint32_t size,uint32_t flags)440 long sys_finish_dma(user_addr_t uaddr, uint32_t size, uint32_t flags) {
441 LTRACEF("uaddr 0x%" PRIxPTR_USER ", size 0x%x, flags 0x%x\n", uaddr, size,
442 flags);
443
444 /* check buffer is in task's address space */
445 if (!valid_address((vaddr_t)uaddr, size))
446 return ERR_INVALID_ARGS;
447
448 if (flags & DMA_FLAG_FROM_DEVICE)
449 arch_clean_invalidate_cache_range(uaddr, size);
450
451 /*
452 * Check that app prepared dma on the provided virtual address range.
453 * Returns ERR_NOT_FOUND if the range wasn't found. One way this can
454 * happen is when an app finishes a dma range that it didn't prepare.
455 */
456 return trusty_app_destroy_dma_range((vaddr_t)uaddr, size);
457 }
458
sys_set_user_tls(user_addr_t uaddr)459 long sys_set_user_tls(user_addr_t uaddr) {
460 arch_set_user_tls(uaddr);
461 return NO_ERROR;
462 }
463
sys_memref_create(user_addr_t uaddr,user_size_t size,uint32_t mmap_prot)464 long sys_memref_create(user_addr_t uaddr,
465 user_size_t size,
466 uint32_t mmap_prot) {
467 struct trusty_app* app = current_trusty_app();
468 struct handle* handle;
469 handle_id_t id;
470 status_t rc = memref_create_from_aspace(app->aspace, uaddr, size, mmap_prot,
471 &handle);
472 if (rc) {
473 LTRACEF("failed to create memref\n");
474 return rc;
475 }
476
477 int rc_uctx = uctx_handle_install(current_uctx(), handle, &id);
478 /*
479 * uctx_handle_install takes a reference to the handle, so we release
480 * ours now. If it failed, this will release it. If it succeeded, this
481 * prevents us from leaking when the application is destroyed.
482 */
483 handle_decref(handle);
484 if (rc_uctx) {
485 LTRACEF("failed to install handle\n");
486 return rc_uctx;
487 }
488
489 LTRACEF("memref created: %d\n", id);
490 return id;
491 }
492