1 /*
2 * Copyright (c) 2014 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <kernel/vm.h>
24 #include "vm_priv.h"
25
26 #include <assert.h>
27 #include <trace.h>
28 #include <err.h>
29 #include <string.h>
30 #include <lk/init.h>
31 #include <lib/console.h>
32 #include <arch/mmu.h>
33 #include <kernel/thread.h>
34 #include <debug.h>
35 #include <inttypes.h>
36
37 #define LOCAL_TRACE 0
38
39 extern int _start;
40 extern int _end;
41 extern char __code_start;
42 extern char __rodata_start;
43 extern char __data_start;
44
45 /* mark the physical pages backing a range of virtual as in use.
46 * allocate the physical pages and throw them away */
mark_pages_in_use(vaddr_t va,size_t len)47 static void mark_pages_in_use(vaddr_t va, size_t len)
48 {
49 LTRACEF("va 0x%" PRIxVADDR ", len 0x%zx\n", va, len);
50
51 struct list_node list;
52 list_initialize(&list);
53
54 /* make sure we are inclusive of all of the pages in the address range */
55 len = page_align(len + (va & (PAGE_SIZE - 1)));
56 va = round_down(va, PAGE_SIZE);
57
58 LTRACEF("aligned va 0x%" PRIxVADDR ", len 0x%zx\n", va, len);
59
60 for (size_t offset = 0; offset < len; offset += PAGE_SIZE) {
61 uint flags;
62 paddr_t pa;
63
64 status_t err = arch_mmu_query(&vmm_get_kernel_aspace()->arch_aspace, va + offset, &pa, &flags);
65 if (err >= 0) {
66 //LTRACEF("va 0x%lx, pa 0x%lx, flags 0x%x, err %d\n", va + offset, pa, flags, err);
67
68 /* allocate the range, throw the results away */
69 if (pmm_alloc_range(pa, 1, &list) != 1) {
70 panic("Could not alloc pa 0x%" PRIxPADDR "\n", pa);
71 }
72 } else {
73 panic("Could not find pa for va 0x%" PRIxVADDR "\n", va);
74 }
75 }
76 }
77
vm_init_preheap(uint level)78 static void vm_init_preheap(uint level)
79 {
80 LTRACE_ENTRY;
81
82 /* allow the vmm a shot at initializing some of its data structures */
83 vmm_init_preheap();
84
85 /* mark all of the kernel pages in use */
86 LTRACEF("marking all kernel pages as used\n");
87 mark_pages_in_use((vaddr_t)&_start, ((uintptr_t)&_end - (uintptr_t)&_start));
88
89 /* the boot time allocator should not be used after this */
90 uintptr_t alloc_start = boot_alloc_start;
91 uintptr_t alloc_end = boot_alloc_end;
92 boot_alloc_start = 0;
93 boot_alloc_end = 0;
94
95 /* mark the physical pages used by the boot time allocator */
96 if (alloc_end != alloc_start) {
97 LTRACEF("marking boot alloc used from 0x%" PRIxPTR " to 0x%" PRIxPTR "\n", alloc_start, alloc_end);
98
99 /*
100 * if _end is not page aligned, the kernel and the boot time allocator
101 * may share a page. Do not mark this page a second time.
102 */
103 ASSERT(alloc_start == (uintptr_t)&_end);
104 alloc_start = page_align(alloc_start);
105
106 /*
107 * aligning start could move it past end. In this case, the data is in a
108 * single page and it has already been marked.
109 */
110 if (alloc_start < alloc_end) {
111 mark_pages_in_use(alloc_start, alloc_end - alloc_start);
112 }
113 }
114 }
115
vm_init_postheap(uint level)116 static void vm_init_postheap(uint level)
117 {
118 LTRACE_ENTRY;
119
120 vmm_init();
121
122 /* create vmm regions to cover what is already there from the initial mapping table */
123 struct mmu_initial_mapping *map = mmu_initial_mappings;
124 while (map->size > 0) {
125 if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY)) {
126 vmm_reserve_space(vmm_get_kernel_aspace(), map->name, map->size, map->virt);
127 }
128
129 map++;
130 }
131 }
132
vm_assign_initial_dynamic(paddr_t kernel_start,size_t ram_size)133 void vm_assign_initial_dynamic(paddr_t kernel_start, size_t ram_size)
134 {
135 for (struct mmu_initial_mapping *m = mmu_initial_mappings; m->size; m++) {
136 if (m->flags & MMU_INITIAL_MAPPING_FLAG_DYNAMIC) {
137 m->phys = kernel_start;
138 m->size = ram_size;
139 }
140 }
141 }
142
vm_map_initial_mappings(void)143 void vm_map_initial_mappings(void)
144 {
145 for (struct mmu_initial_mapping *m = mmu_initial_mappings; m->size; m++) {
146 paddr_t paddr = m->phys;
147 vaddr_t vaddr = m->virt;
148 size_t mapping_size = m->size;
149 for (;;) {
150 size_t size = mapping_size;
151 uint flags;
152 if (m->flags & MMU_INITIAL_MAPPING_FLAG_UNCACHED) {
153 flags = ARCH_MMU_FLAG_UNCACHED | ARCH_MMU_FLAG_PERM_NO_EXECUTE;
154 } else if (m->flags & MMU_INITIAL_MAPPING_FLAG_DEVICE) {
155 flags = ARCH_MMU_FLAG_UNCACHED_DEVICE | ARCH_MMU_FLAG_PERM_NO_EXECUTE;
156 } else {
157 /* Determine the segment in which the memory resides and set appropriate
158 * attributes. In order to handle offset kernels, the following rules are
159 * implemented below:
160 * KERNEL_BASE to __code_start -read/write (see note below)
161 * __code_start to __rodata_start (.text) -read only
162 * __rodata_start to __data_start (.rodata) -read only, execute never
163 * __data_start to ..... (.data) -read/write
164 *
165 * The space below __code_start is presently left as read/write (same as .data)
166 * mainly as a workaround for the raspberry pi boot process. Boot vectors for
167 * secondary CPUs are in this area and need to be updated by cpu0 once the system
168 * is ready to boot the secondary processors.
169 * TODO: handle this via mmu_initial_mapping entries, which may need to be
170 * extended with additional flag types
171 */
172 flags = ARCH_MMU_FLAG_CACHED;
173 if (paddr < (paddr_t)&__code_start) {
174 /* If page is below the entry point (_start) mark as kernel data */
175 size = (paddr_t)&__code_start - paddr;
176 flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
177 } else if (paddr < (paddr_t)&__rodata_start) {
178 size = (paddr_t)&__rodata_start - paddr;
179 flags |= ARCH_MMU_FLAG_PERM_RO;
180 } else if (paddr < (paddr_t)&__data_start) {
181 size = (paddr_t)&__data_start - paddr;
182 flags |= ARCH_MMU_FLAG_PERM_RO;
183 flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
184 } else {
185 ASSERT(paddr < (paddr_t)&_end);
186 ASSERT(((paddr_t)&_end - paddr) <= mapping_size);
187 flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
188 }
189 }
190
191 ASSERT(size <= mapping_size);
192 arch_mmu_map_early(vaddr, paddr, size, flags);
193
194 mapping_size -= size;
195 if (!mapping_size) {
196 break;
197 }
198 paddr += size;
199 vaddr += size;
200 }
201 }
202 }
203
kvaddr_get_range(size_t * size_return)204 void *kvaddr_get_range(size_t* size_return)
205 {
206 *size_return = mmu_initial_mappings->size;
207 return (void*)mmu_initial_mappings->virt;
208 }
209
paddr_to_kvaddr(paddr_t pa)210 void *paddr_to_kvaddr(paddr_t pa)
211 {
212 /* slow path to do reverse lookup */
213 struct mmu_initial_mapping *map = mmu_initial_mappings;
214 while (map->size > 0) {
215 if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY) &&
216 pa >= map->phys &&
217 pa <= map->phys + map->size - 1) {
218 return (void *)(map->virt + (pa - map->phys));
219 }
220 map++;
221 }
222 return pmm_paddr_to_kvaddr(pa);
223 }
224
vaddr_to_paddr(void * ptr)225 paddr_t vaddr_to_paddr(void *ptr)
226 {
227 vmm_aspace_t *aspace = vaddr_to_aspace(ptr);
228 if (!aspace)
229 return (paddr_t)NULL;
230
231 paddr_t pa;
232 status_t rc = arch_mmu_query(&aspace->arch_aspace, (vaddr_t)ptr, &pa, NULL);
233 if (rc)
234 return (paddr_t)NULL;
235
236 return pa;
237 }
238
vaddr_to_aspace(void * ptr)239 vmm_aspace_t *vaddr_to_aspace(void *ptr)
240 {
241 if (is_kernel_address((vaddr_t)ptr)) {
242 return vmm_get_kernel_aspace();
243 } else if (is_user_address((vaddr_t)ptr)) {
244 return get_current_thread()->aspace;
245 } else {
246 return NULL;
247 }
248 }
249
cmd_vm(int argc,const cmd_args * argv)250 static int cmd_vm(int argc, const cmd_args *argv)
251 {
252 if (argc < 2) {
253 notenoughargs:
254 printf("not enough arguments\n");
255 usage:
256 printf("usage:\n");
257 printf("%s phys2virt <address>\n", argv[0].str);
258 printf("%s virt2phys <address>\n", argv[0].str);
259 printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
260 printf("%s unmap <virt> <count>\n", argv[0].str);
261 return ERR_GENERIC;
262 }
263
264 if (!strcmp(argv[1].str, "phys2virt")) {
265 if (argc < 3) goto notenoughargs;
266
267 void *ptr = paddr_to_kvaddr((paddr_t)argv[2].u);
268 printf("paddr_to_kvaddr returns %p\n", ptr);
269 } else if (!strcmp(argv[1].str, "virt2phys")) {
270 if (argc < 3) goto notenoughargs;
271
272 vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
273 if (!aspace) {
274 printf("ERROR: outside of any address space\n");
275 return -1;
276 }
277
278 paddr_t pa;
279 uint flags;
280 status_t err = arch_mmu_query(&aspace->arch_aspace, argv[2].u, &pa, &flags);
281 printf("arch_mmu_query returns %d\n", err);
282 if (err >= 0) {
283 printf("\tpa 0x%" PRIxPADDR ", flags 0x%x\n", pa, flags);
284 }
285 } else if (!strcmp(argv[1].str, "map")) {
286 if (argc < 6) goto notenoughargs;
287
288 vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
289 if (!aspace) {
290 printf("ERROR: outside of any address space\n");
291 return -1;
292 }
293
294 int err = arch_mmu_map(&aspace->arch_aspace, argv[3].u, argv[2].u, argv[4].u, argv[5].u);
295 printf("arch_mmu_map returns %d\n", err);
296 } else if (!strcmp(argv[1].str, "unmap")) {
297 if (argc < 4) goto notenoughargs;
298
299 vmm_aspace_t *aspace = vaddr_to_aspace((void *)argv[2].u);
300 if (!aspace) {
301 printf("ERROR: outside of any address space\n");
302 return -1;
303 }
304
305 int err = arch_mmu_unmap(&aspace->arch_aspace, argv[2].u, argv[3].u);
306 printf("arch_mmu_unmap returns %d\n", err);
307 } else {
308 printf("unknown command\n");
309 goto usage;
310 }
311
312 return NO_ERROR;
313 }
314
315 STATIC_COMMAND_START
316 #if LK_DEBUGLEVEL > 0
317 STATIC_COMMAND("vm", "vm commands", &cmd_vm)
318 #endif
319 STATIC_COMMAND_END(vm);
320
321 LK_INIT_HOOK(vm_preheap, &vm_init_preheap, LK_INIT_LEVEL_HEAP - 1);
322 LK_INIT_HOOK(vm, &vm_init_postheap, LK_INIT_LEVEL_VM);
323