1 /*
2 * Copyright (c) 2020 LK Trusty Authors. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <err.h>
25 #include <kernel/vm.h>
26 #include <kernel/physmem.h>
27 #include <trace.h>
28 #include <inttypes.h>
29
30 #define LOCAL_TRACE 0
31
32 static int phys_mem_obj_check_flags(struct vmm_obj* obj, uint* arch_mmu_flags);
33 static int phys_mem_obj_get_page(struct vmm_obj* obj,
34 size_t offset,
35 paddr_t* paddr,
36 size_t* paddr_size);
37 static void phys_mem_obj_destroy(struct vmm_obj* vmm_obj);
38
39 static struct vmm_obj_ops phys_mem_obj_ops = {
40 .check_flags = phys_mem_obj_check_flags,
41 .get_page = phys_mem_obj_get_page,
42 .destroy = phys_mem_obj_destroy,
43 };
44
phys_mem_obj_from_vmm_obj(struct vmm_obj * vmm_obj)45 static struct phys_mem_obj* phys_mem_obj_from_vmm_obj(struct vmm_obj* vmm_obj) {
46 return containerof(vmm_obj, struct phys_mem_obj, vmm_obj);
47 }
48
phys_mem_obj_dynamic_initialize(struct phys_mem_obj * obj,struct obj_ref * ref,paddr_t paddr,size_t size,uint arch_mmu_flags,void (* destroy_fn)(struct phys_mem_obj *))49 void phys_mem_obj_dynamic_initialize(struct phys_mem_obj* obj,
50 struct obj_ref* ref,
51 paddr_t paddr,
52 size_t size,
53 uint arch_mmu_flags,
54 void (*destroy_fn)(struct phys_mem_obj*)) {
55
56 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
57 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
58 DEBUG_ASSERT((arch_mmu_flags & ~(
59 ARCH_MMU_FLAG_CACHE_MASK |
60 ARCH_MMU_FLAG_PERM_RO |
61 ARCH_MMU_FLAG_PERM_NO_EXECUTE |
62 ARCH_MMU_FLAG_NS)) == 0);
63 DEBUG_ASSERT(destroy_fn);
64
65 obj->vmm_obj.ops = &phys_mem_obj_ops;
66 obj->paddr = paddr;
67 obj->size = size;
68 obj->arch_mmu_flags = arch_mmu_flags;
69 obj->destroy_fn = destroy_fn;
70 obj_init(&obj->vmm_obj.obj, ref);
71 }
72
phys_mem_obj_default_destroy(struct phys_mem_obj * obj)73 static void phys_mem_obj_default_destroy(struct phys_mem_obj* obj) {
74 TRACEF("Warning: illegally destroy phys_obj %p\n", obj);
75 }
76
phys_mem_obj_initialize(struct phys_mem_obj * obj,struct obj_ref * ref,paddr_t paddr,size_t size,uint arch_mmu_flags)77 void phys_mem_obj_initialize(struct phys_mem_obj* obj,
78 struct obj_ref* ref,
79 paddr_t paddr,
80 size_t size,
81 uint arch_mmu_flags) {
82 phys_mem_obj_dynamic_initialize(obj, ref, paddr, size, arch_mmu_flags,
83 phys_mem_obj_default_destroy);
84 }
85
phys_mem_obj_check_flags(struct vmm_obj * obj,uint * arch_mmu_flags)86 static int phys_mem_obj_check_flags(struct vmm_obj* obj,
87 uint* arch_mmu_flags) {
88 struct phys_mem_obj* phys_obj = phys_mem_obj_from_vmm_obj(obj);
89
90 LTRACEF("obj arch_mmu_flags 0x%x, arch_mmu_flags 0x%x\n",
91 phys_obj->arch_mmu_flags, *arch_mmu_flags);
92
93 if (!(*arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO) &&
94 (phys_obj->arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO)) {
95 TRACEF("rw access denied. arch_mmu_flags=0x%x, phys_obj->flags=0x%x\n",
96 *arch_mmu_flags, phys_obj->arch_mmu_flags);
97 return ERR_ACCESS_DENIED;
98 }
99
100 if (!(*arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) &&
101 (phys_obj->arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)) {
102 TRACEF("exec access denied. arch_mmu_flags=0x%x, phys_obj->flags=0x%x\n",
103 *arch_mmu_flags, phys_obj->arch_mmu_flags);
104 return ERR_ACCESS_DENIED;
105 }
106
107 if (*arch_mmu_flags & ARCH_MMU_FLAG_NS) {
108 TRACEF("ARCH_MMU_FLAG_NS should come from vmm_obj, not from caller\n");
109 return ERR_INVALID_ARGS;
110 }
111
112 if (*arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) {
113 TRACEF("cache attributes should come from vmm_obj, not from caller\n");
114 return ERR_INVALID_ARGS;
115 }
116
117 *arch_mmu_flags |= phys_obj->arch_mmu_flags;
118
119 return 0;
120 }
121
phys_mem_obj_get_page(struct vmm_obj * obj,size_t offset,paddr_t * paddr,size_t * paddr_size)122 static int phys_mem_obj_get_page(struct vmm_obj* obj,
123 size_t offset,
124 paddr_t* paddr,
125 size_t* paddr_size) {
126 struct phys_mem_obj* phys_obj = phys_mem_obj_from_vmm_obj(obj);
127
128 LTRACEF("offset %zd phys_obj paddr 0x%" PRIxPADDR "\n", offset, phys_obj->paddr);
129
130 if (offset >= phys_obj->size) {
131 TRACEF("offset %zd out of range size %zd\n", offset,
132 phys_obj->size);
133 return ERR_OUT_OF_RANGE;
134 }
135
136 *paddr = phys_obj->paddr + offset;
137 *paddr_size = phys_obj->size - offset;
138 LTRACEF("offset %zd -> paddr 0x%" PRIxPADDR ", size %zu\n", offset,
139 *paddr, *paddr_size);
140
141 return 0;
142 }
143
phys_mem_obj_destroy(struct vmm_obj * vmm_obj)144 static void phys_mem_obj_destroy(struct vmm_obj* vmm_obj) {
145 struct phys_mem_obj* obj = containerof(vmm_obj,
146 struct phys_mem_obj,
147 vmm_obj);
148
149 DEBUG_ASSERT(obj->destroy_fn);
150 obj->destroy_fn(obj);
151 }
152