1 /*
2 * Copyright (c) 2008-2016 Travis Geiselbrecht
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <debug.h>
24 #include <trace.h>
25 #include <stdlib.h>
26 #include <sys/types.h>
27 #include <err.h>
28 #include <string.h>
29 #include <compiler.h>
30 #include <pow2.h>
31 #include <arch.h>
32 #include <arch/ops.h>
33 #include <arch/mmu.h>
34 #include <arch/arm.h>
35 #include <arch/arm/mmu.h>
36 #include <kernel/vm.h>
37 #include <lk/init.h>
38 #include <inttypes.h>
39
40 #define LOCAL_TRACE 0
41 #define TRACE_CONTEXT_SWITCH 0
42
43 #if ARM_WITH_MMU
44
45 #define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE)
46 #define IS_SUPERSECTION_ALIGNED(x) IS_ALIGNED(x, SUPERSECTION_SIZE)
47
48 /* locals */
49 static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags);
50 static void arm_mmu_unmap_section(arch_aspace_t *aspace, addr_t vaddr);
51
52 /* the main translation table */
53 uint32_t arm_kernel_translation_table[TT_ENTRY_COUNT] __ALIGNED(16384) __SECTION(".bss.prebss.translation_table");
54
55 /* convert user level mmu flags to flags that go in L1 descriptors */
mmu_flags_to_l1_arch_flags(uint flags)56 static uint32_t mmu_flags_to_l1_arch_flags(uint flags)
57 {
58 uint32_t arch_flags = 0;
59 switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
60 case ARCH_MMU_FLAG_CACHED:
61 arch_flags |= MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
62 #if WITH_SMP | WITH_SHAREABLE_CACHE
63 arch_flags |= MMU_MEMORY_L1_SECTION_SHAREABLE;
64 #endif
65 break;
66 case ARCH_MMU_FLAG_UNCACHED:
67 arch_flags |= MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED;
68 break;
69 case ARCH_MMU_FLAG_UNCACHED_DEVICE:
70 arch_flags |= MMU_MEMORY_L1_TYPE_DEVICE_SHARED;
71 break;
72 default:
73 /* invalid user-supplied flag */
74 DEBUG_ASSERT(0);
75 return ERR_INVALID_ARGS;
76 }
77
78 switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
79 case 0:
80 arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_NA;
81 break;
82 case ARCH_MMU_FLAG_PERM_RO:
83 arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_NA;
84 break;
85 case ARCH_MMU_FLAG_PERM_USER:
86 arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_RW;
87 break;
88 case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
89 arch_flags |= MMU_MEMORY_L1_AP_P_RO_U_RO;
90 break;
91 }
92
93 if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
94 arch_flags |= MMU_MEMORY_L1_SECTION_XN;
95 }
96
97 if (flags & ARCH_MMU_FLAG_NS) {
98 arch_flags |= MMU_MEMORY_L1_SECTION_NON_SECURE;
99 }
100
101 return arch_flags;
102 }
103
104 /* convert user level mmu flags to flags that go in L2 descriptors */
mmu_flags_to_l2_arch_flags_small_page(uint flags)105 static uint32_t mmu_flags_to_l2_arch_flags_small_page(uint flags)
106 {
107 uint32_t arch_flags = 0;
108 switch (flags & ARCH_MMU_FLAG_CACHE_MASK) {
109 case ARCH_MMU_FLAG_CACHED:
110 #if WITH_SMP | WITH_SHAREABLE_CACHE
111 arch_flags |= MMU_MEMORY_L2_SHAREABLE;
112 #endif
113 arch_flags |= MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
114 #if WITH_SMP | WITH_SHAREABLE_CACHE
115 arch_flags |= MMU_MEMORY_L2_SHAREABLE;
116 #endif
117 break;
118 case ARCH_MMU_FLAG_UNCACHED:
119 arch_flags |= MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED;
120 break;
121 case ARCH_MMU_FLAG_UNCACHED_DEVICE:
122 arch_flags |= MMU_MEMORY_L2_TYPE_DEVICE_SHARED;
123 break;
124 default:
125 /* invalid user-supplied flag */
126 DEBUG_ASSERT(0);
127 return ERR_INVALID_ARGS;
128 }
129
130 switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) {
131 case 0:
132 arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_NA;
133 break;
134 case ARCH_MMU_FLAG_PERM_RO:
135 arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_NA;
136 break;
137 case ARCH_MMU_FLAG_PERM_USER:
138 arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_RW;
139 break;
140 case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO:
141 arch_flags |= MMU_MEMORY_L2_AP_P_RO_U_RO;
142 break;
143 }
144
145 if (flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE) {
146 arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN;
147 } else {
148 arch_flags |= MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE;
149 }
150
151 return arch_flags;
152 }
153
is_valid_vaddr(arch_aspace_t * aspace,vaddr_t vaddr)154 static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr)
155 {
156 return (vaddr >= aspace->base && vaddr <= aspace->base + (aspace->size - 1));
157 }
158
arm_mmu_map_section(arch_aspace_t * aspace,addr_t paddr,addr_t vaddr,uint flags)159 static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags)
160 {
161 int index;
162
163 LTRACEF("aspace %p tt %p pa 0x%" PRIxPADDR " va 0x%" PRIxVADDR " flags 0x%x\n", aspace, aspace->tt_virt, paddr, vaddr, flags);
164
165 DEBUG_ASSERT(aspace);
166 DEBUG_ASSERT(aspace->tt_virt);
167 DEBUG_ASSERT(IS_SECTION_ALIGNED(paddr));
168 DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
169 DEBUG_ASSERT((flags & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_SECTION);
170
171 /* Get the index into the translation table */
172 index = vaddr / SECTION_SIZE;
173
174 /* Set the entry value:
175 * (2<<0): Section entry
176 * (0<<5): Domain = 0
177 * flags: TEX, CB and AP bit settings provided by the caller.
178 */
179 aspace->tt_virt[index] = (paddr & ~(MB-1)) | (MMU_MEMORY_DOMAIN_MEM << 5) | MMU_MEMORY_L1_DESCRIPTOR_SECTION | flags;
180 }
181
arm_mmu_unmap_l1_entry(uint32_t * translation_table,uint32_t index)182 static void arm_mmu_unmap_l1_entry(uint32_t *translation_table, uint32_t index)
183 {
184 DEBUG_ASSERT(translation_table);
185 DEBUG_ASSERT(index < TT_ENTRY_COUNT);
186
187 translation_table[index] = 0;
188 DSB;
189 arm_invalidate_tlb_mva_no_barrier((vaddr_t)index * SECTION_SIZE);
190 }
191
arm_mmu_unmap_section(arch_aspace_t * aspace,addr_t vaddr)192 static void arm_mmu_unmap_section(arch_aspace_t *aspace, addr_t vaddr)
193 {
194 DEBUG_ASSERT(aspace);
195 DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr));
196 arm_mmu_unmap_l1_entry(aspace->tt_virt, vaddr / SECTION_SIZE);
197 }
198
arm_mmu_early_init(void)199 void arm_mmu_early_init(void)
200 {
201 }
202
arm_mmu_init(void)203 void arm_mmu_init(void)
204 {
205 /* unmap the initial mapings that are marked temporary */
206 struct mmu_initial_mapping *map = mmu_initial_mappings;
207 while (map->size > 0) {
208 if (map->flags & MMU_INITIAL_MAPPING_TEMPORARY) {
209 vaddr_t va = map->virt;
210 size_t size = map->size;
211
212 DEBUG_ASSERT(IS_SECTION_ALIGNED(size));
213
214 while (size > 0) {
215 arm_mmu_unmap_l1_entry(arm_kernel_translation_table, va / SECTION_SIZE);
216 va += MB;
217 size -= MB;
218 }
219 }
220 map++;
221 }
222 arm_after_invalidate_tlb_barrier();
223
224 #if KERNEL_ASPACE_BASE != 0
225 /* bounce the ttbr over to ttbr1 and leave 0 unmapped */
226 uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE - 1);
227 DEBUG_ASSERT(n <= 7);
228
229 /* KERNEL_ASPACE_BASE has to be a power of 2, 32MB..2GB in size */
230 STATIC_ASSERT(KERNEL_ASPACE_BASE >= 32*MB);
231 STATIC_ASSERT(KERNEL_ASPACE_BASE <= 2*GB);
232 STATIC_ASSERT(((KERNEL_ASPACE_BASE - 1) & KERNEL_ASPACE_BASE) == 0);
233
234 uint32_t ttbcr = (1<<4) | n; /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */
235
236 arm_write_ttbr1(arm_read_ttbr0());
237 ISB;
238 arm_write_ttbcr(ttbcr);
239 ISB;
240 arm_write_ttbr0(0);
241 ISB;
242 arm_invalidate_tlb_global();
243 #endif
244 }
245
arm_secondary_mmu_init(uint level)246 static void arm_secondary_mmu_init(uint level)
247 {
248 uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE - 1);
249 uint32_t cur_ttbr0;
250
251 cur_ttbr0 = arm_read_ttbr0();
252
253 /* push out kernel mappings to ttbr1 */
254 arm_write_ttbr1(cur_ttbr0);
255
256 /* setup a user-kernel split */
257 arm_write_ttbcr(n);
258
259 arm_invalidate_tlb_global();
260 }
261
262 LK_INIT_HOOK_FLAGS(archarmmmu, arm_secondary_mmu_init,
263 LK_INIT_LEVEL_ARCH_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
264
arch_disable_mmu(void)265 void arch_disable_mmu(void)
266 {
267 arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled
268 }
269
arch_mmu_context_switch(arch_aspace_t * aspace)270 void arch_mmu_context_switch(arch_aspace_t *aspace)
271 {
272 if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
273 LTRACEF("aspace %p\n", aspace);
274
275 uint32_t ttbr;
276 uint32_t ttbcr = arm_read_ttbcr();
277 if (aspace) {
278 ttbr = MMU_TTBRx_FLAGS | (aspace->tt_phys);
279 ttbcr &= ~(1U<<4); // enable TTBR0
280 } else {
281 ttbr = 0;
282 ttbcr |= (1U<<4); // disable TTBR0
283 }
284
285 if (LOCAL_TRACE && TRACE_CONTEXT_SWITCH)
286 LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
287 arm_write_ttbr0(ttbr);
288 arm_write_ttbcr(ttbcr);
289 arm_invalidate_tlb_global();
290 }
291
arch_mmu_query(arch_aspace_t * aspace,vaddr_t vaddr,paddr_t * paddr,uint * flags)292 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags)
293 {
294 LTRACEF("aspace %p, vaddr 0x%" PRIxVADDR "\n", aspace, vaddr);
295
296 DEBUG_ASSERT(aspace);
297 DEBUG_ASSERT(aspace->tt_virt);
298
299 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
300 if (!is_valid_vaddr(aspace, vaddr))
301 return ERR_OUT_OF_RANGE;
302
303 /* Get the index into the translation table */
304 uint index = vaddr / MB;
305
306 /* decode it */
307 uint32_t tt_entry = aspace->tt_virt[index];
308 switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
309 case MMU_MEMORY_L1_DESCRIPTOR_INVALID:
310 return ERR_NOT_FOUND;
311 case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
312 if (tt_entry & (1<<18)) {
313 /* supersection */
314 PANIC_UNIMPLEMENTED;
315 }
316
317 /* section */
318 if (paddr)
319 *paddr = MMU_MEMORY_L1_SECTION_ADDR(tt_entry) + (vaddr & (SECTION_SIZE - 1));
320
321 if (flags) {
322 *flags = 0;
323 if (tt_entry & MMU_MEMORY_L1_SECTION_NON_SECURE)
324 *flags |= ARCH_MMU_FLAG_NS;
325 switch (tt_entry & MMU_MEMORY_L1_TYPE_MASK) {
326 case MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED:
327 *flags |= ARCH_MMU_FLAG_UNCACHED;
328 break;
329 case MMU_MEMORY_L1_TYPE_DEVICE_SHARED:
330 case MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED:
331 *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
332 break;
333 }
334 switch (tt_entry & MMU_MEMORY_L1_AP_MASK) {
335 case MMU_MEMORY_L1_AP_P_RO_U_NA:
336 *flags |= ARCH_MMU_FLAG_PERM_RO;
337 break;
338 case MMU_MEMORY_L1_AP_P_RW_U_NA:
339 break;
340 case MMU_MEMORY_L1_AP_P_RO_U_RO:
341 *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
342 break;
343 case MMU_MEMORY_L1_AP_P_RW_U_RW:
344 *flags |= ARCH_MMU_FLAG_PERM_USER;
345 break;
346 }
347 if (tt_entry & MMU_MEMORY_L1_SECTION_XN) {
348 *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
349 }
350 }
351 break;
352 case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
353 uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
354 uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
355 uint32_t l2_entry = l2_table[l2_index];
356
357 //LTRACEF("l2_table at %p, index %u, entry 0x%x\n", l2_table, l2_index, l2_entry);
358
359 switch (l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) {
360 default:
361 case MMU_MEMORY_L2_DESCRIPTOR_INVALID:
362 return ERR_NOT_FOUND;
363 case MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE:
364 PANIC_UNIMPLEMENTED;
365 break;
366 case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE:
367 case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN:
368 if (paddr)
369 *paddr = MMU_MEMORY_L2_SMALL_PAGE_ADDR(l2_entry) + (vaddr & (PAGE_SIZE - 1));
370
371 if (flags) {
372 *flags = 0;
373 /* NS flag is only present on L1 entry */
374 if (tt_entry & MMU_MEMORY_L1_PAGETABLE_NON_SECURE)
375 *flags |= ARCH_MMU_FLAG_NS;
376 switch (l2_entry & MMU_MEMORY_L2_TYPE_MASK) {
377 case MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED:
378 *flags |= ARCH_MMU_FLAG_UNCACHED;
379 break;
380 case MMU_MEMORY_L2_TYPE_DEVICE_SHARED:
381 case MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED:
382 *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
383 break;
384 }
385 switch (l2_entry & MMU_MEMORY_L2_AP_MASK) {
386 case MMU_MEMORY_L2_AP_P_RO_U_NA:
387 *flags |= ARCH_MMU_FLAG_PERM_RO;
388 break;
389 case MMU_MEMORY_L2_AP_P_RW_U_NA:
390 break;
391 case MMU_MEMORY_L2_AP_P_RO_U_RO:
392 *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO;
393 break;
394 case MMU_MEMORY_L2_AP_P_RW_U_RW:
395 *flags |= ARCH_MMU_FLAG_PERM_USER;
396 break;
397 }
398 if ((l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) ==
399 MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN) {
400 *flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
401 }
402 }
403 break;
404 }
405
406 break;
407 }
408 default:
409 PANIC_UNIMPLEMENTED;
410 }
411
412 return NO_ERROR;
413 }
414
415
416 /*
417 * We allow up to 4 adjacent L1 entries to point within the same memory page
418 * allocated for L2 page tables.
419 *
420 * L1: | 0 | 1 | 2 | 3 | .... | N+0 | N+1 | N+2 | N+3 |
421 * L2: [ 0 | .....[ (N/4) |
422 */
423 #define L1E_PER_PAGE 4
424
get_l2_table(arch_aspace_t * aspace,uint32_t l1_index,paddr_t * ppa)425 static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *ppa)
426 {
427 status_t ret;
428 paddr_t pa;
429 uint32_t tt_entry;
430
431 DEBUG_ASSERT(aspace);
432 DEBUG_ASSERT(ppa);
433
434 /* lookup an existing l2 pagetable */
435 for (uint i = 0; i < L1E_PER_PAGE; i++) {
436 tt_entry = aspace->tt_virt[round_down(l1_index, L1E_PER_PAGE) + i];
437 if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
438 == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
439 *ppa = (paddr_t)round_down(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE)
440 + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
441 return NO_ERROR;
442 }
443 }
444
445 /* not found: allocate it */
446 uint32_t *l2_va = pmm_alloc_kpages(1, &aspace->pt_page_list);
447 if (!l2_va)
448 return ERR_NO_MEMORY;
449
450 /* wipe it clean to set no access */
451 memset(l2_va, 0, PAGE_SIZE);
452
453 /* get physical address */
454 ret = arm_vtop((vaddr_t)l2_va, &pa);
455 ASSERT(!ret);
456 ASSERT(paddr_to_kvaddr(pa));
457
458 DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va));
459 DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
460
461 *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1));
462
463 LTRACEF("allocated pagetable at %p, pa 0x%" PRIxPADDR ", pa 0x%" PRIxPADDR "\n", l2_va, pa, *ppa);
464 return NO_ERROR;
465 }
466
467
put_l2_table(arch_aspace_t * aspace,uint32_t l1_index,paddr_t l2_pa)468 static void put_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t l2_pa)
469 {
470 DEBUG_ASSERT(aspace);
471
472 /* check if any l1 entry points to this l2 table */
473 for (uint i = 0; i < L1E_PER_PAGE; i++) {
474 uint32_t tt_entry = aspace->tt_virt[round_down(l1_index, L1E_PER_PAGE) + i];
475 if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK)
476 == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) {
477 return;
478 }
479 }
480
481 /* we can free this l2 table */
482 vm_page_t *page = paddr_to_vm_page(l2_pa);
483 if (!page)
484 panic("bad page table paddr 0x%" PRIxPADDR "\n", l2_pa);
485
486 /* verify that it is in our page list */
487 DEBUG_ASSERT(list_in_list(&page->node));
488
489 list_delete(&page->node);
490
491 LTRACEF("freeing pagetable at 0x%" PRIxPADDR "\n", l2_pa);
492 pmm_free_page(page);
493 }
494
495 #if WITH_ARCH_MMU_PICK_SPOT
496
are_regions_compatible(uint new_region_flags,uint adjacent_region_flags)497 static inline bool are_regions_compatible(uint new_region_flags,
498 uint adjacent_region_flags)
499 {
500 /*
501 * Two regions are compatible if NS flag matches.
502 */
503 uint mask = ARCH_MMU_FLAG_NS;
504
505 if ((new_region_flags & mask) == (adjacent_region_flags & mask))
506 return true;
507
508 return false;
509 }
510
511 /* Performs explicit wrapping checks, so allow overflow */
512 __attribute__((no_sanitize("unsigned-integer-overflow")))
arch_mmu_pick_spot(arch_aspace_t * aspace,vaddr_t base,uint prev_region_flags,vaddr_t end,uint next_region_flags,vaddr_t alignment,size_t size,uint flags)513 vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
514 vaddr_t base, uint prev_region_flags,
515 vaddr_t end, uint next_region_flags,
516 vaddr_t alignment, size_t size, uint flags)
517 {
518 LTRACEF("base 0x%" PRIxVADDR ", end 0x%" PRIxVADDR ", align %" PRIdVADDR ", size %zd, flags 0x%x\n",
519 base, end, alignment, size, flags);
520
521 vaddr_t spot;
522
523 if (alignment >= SECTION_SIZE ||
524 are_regions_compatible(flags, prev_region_flags)) {
525 spot = align(base, alignment);
526 } else {
527 spot = align(base, SECTION_SIZE);
528 }
529
530 vaddr_t spot_end = spot + size - 1;
531 if (spot_end < spot || spot_end > end)
532 return end; /* wrapped around or it does not fit */
533
534 if ((spot_end / SECTION_SIZE) == (end / SECTION_SIZE)) {
535 if (!are_regions_compatible(flags, next_region_flags))
536 return end;
537 }
538
539 return spot;
540 }
541 #endif /* WITH_ARCH_MMU_PICK_SPOT */
542
543
arch_mmu_map(arch_aspace_t * aspace,addr_t vaddr,paddr_t paddr,size_t count,uint flags)544 int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, size_t count, uint flags)
545 {
546 LTRACEF("vaddr 0x%" PRIxVADDR " paddr 0x%" PRIxPADDR " count %zu flags 0x%x\n", vaddr, paddr, count, flags);
547
548 DEBUG_ASSERT(aspace);
549 DEBUG_ASSERT(aspace->tt_virt);
550
551 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
552 if (!is_valid_vaddr(aspace, vaddr))
553 return ERR_OUT_OF_RANGE;
554
555 #if !WITH_ARCH_MMU_PICK_SPOT
556 if (flags & ARCH_MMU_FLAG_NS) {
557 /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
558 panic("NS mem is not supported\n");
559 }
560 #endif
561
562 /* paddr and vaddr must be aligned */
563 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
564 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
565 if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
566 return ERR_INVALID_ARGS;
567
568 if (count == 0)
569 return NO_ERROR;
570
571 /* see what kind of mapping we can use */
572 uint mapped = 0;
573 while (count > 0) {
574 if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
575 /* we can use a section */
576
577 /* compute the arch flags for L1 sections */
578 uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
579 MMU_MEMORY_L1_DESCRIPTOR_SECTION;
580
581 /* map it */
582 arm_mmu_map_section(aspace, paddr, vaddr, arch_flags);
583 count -= SECTION_SIZE / PAGE_SIZE;
584 mapped += SECTION_SIZE / PAGE_SIZE;
585 if (__builtin_add_overflow(vaddr, SECTION_SIZE, &vaddr)) {
586 ASSERT(!count);
587 }
588 if (__builtin_add_overflow(paddr, SECTION_SIZE, &paddr)) {
589 ASSERT(!count);
590 }
591 } else {
592 /* will have to use a L2 mapping */
593 uint l1_index = vaddr / SECTION_SIZE;
594 uint32_t tt_entry = aspace->tt_virt[l1_index];
595
596 LTRACEF("tt_entry 0x%x\n", tt_entry);
597 switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
598 case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
599 // XXX will have to break L1 mapping into a L2 page table
600 PANIC_UNIMPLEMENTED;
601 break;
602 case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
603 paddr_t l2_pa = 0;
604 if (get_l2_table(aspace, l1_index, &l2_pa) != NO_ERROR) {
605 TRACEF("failed to allocate pagetable\n");
606 goto done;
607 }
608 tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
609 if (flags & ARCH_MMU_FLAG_NS)
610 tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;
611
612 aspace->tt_virt[l1_index] = tt_entry;
613 __FALLTHROUGH;
614 }
615 case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
616 uint flag_secure = !(flags & ARCH_MMU_FLAG_NS);
617 uint entry_secure = !(tt_entry & MMU_MEMORY_L1_PAGETABLE_NON_SECURE);
618 if (flag_secure != entry_secure) {
619 TRACEF("attempted to allocate secure and non-secure "
620 "pages in the same l2 pagetable\n");
621 goto done;
622 }
623
624 uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
625 LTRACEF("l2_table at %p\n", l2_table);
626
627 DEBUG_ASSERT(l2_table);
628
629 // XXX handle 64K pages here
630
631 /* compute the arch flags for L2 4K pages */
632 uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);
633
634 uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
635 do {
636 l2_table[l2_index++] = paddr | arch_flags;
637 count--;
638 mapped++;
639 if (__builtin_add_overflow(vaddr, PAGE_SIZE, &vaddr)) {
640 ASSERT(!count);
641 }
642 if (__builtin_add_overflow(paddr, PAGE_SIZE, &paddr)) {
643 ASSERT(!count);
644 }
645 } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
646 break;
647 }
648 default:
649 PANIC_UNIMPLEMENTED;
650 }
651 }
652 }
653
654 done:
655 DSB;
656 if (!count) {
657 return 0;
658 }
659 arch_mmu_unmap(aspace, vaddr - mapped * PAGE_SIZE, mapped);
660 return ERR_NO_MEMORY;
661 }
662
arch_mmu_unmap(arch_aspace_t * aspace,vaddr_t vaddr,size_t count)663 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, size_t count)
664 {
665 DEBUG_ASSERT(aspace);
666 DEBUG_ASSERT(aspace->tt_virt);
667
668 DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
669
670 if (!is_valid_vaddr(aspace, vaddr))
671 return ERR_OUT_OF_RANGE;
672
673 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
674 if (!IS_PAGE_ALIGNED(vaddr))
675 return ERR_INVALID_ARGS;
676
677 LTRACEF("vaddr 0x%" PRIxVADDR " count %zu\n", vaddr, count);
678
679 int unmapped = 0;
680 while (count > 0) {
681 uint l1_index = vaddr / SECTION_SIZE;
682 uint32_t tt_entry = aspace->tt_virt[l1_index];
683
684 switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
685 case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
686 /* this top level page is not mapped, move on to the next one */
687 uint page_cnt = MIN((SECTION_SIZE - (vaddr % SECTION_SIZE)) / PAGE_SIZE, count);
688 count -= page_cnt;
689 if (__builtin_add_overflow(vaddr, page_cnt * PAGE_SIZE,
690 &vaddr)) {
691 ASSERT(!count);
692 }
693 break;
694 }
695 case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
696 if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) {
697 /* we're asked to remove at least all of this section, so just zero it out */
698 // XXX test for supersection
699 arm_mmu_unmap_section(aspace, vaddr);
700
701 count -= SECTION_SIZE / PAGE_SIZE;
702 unmapped += SECTION_SIZE / PAGE_SIZE;
703 if (__builtin_add_overflow(vaddr, SECTION_SIZE, &vaddr)) {
704 ASSERT(!count);
705 }
706 } else {
707 // XXX handle unmapping just part of a section
708 // will need to convert to a L2 table and then unmap the parts we are asked to
709 PANIC_UNIMPLEMENTED;
710 }
711 break;
712 case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
713 uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
714 uint page_idx = (vaddr % SECTION_SIZE) / PAGE_SIZE;
715 uint page_cnt = MIN((SECTION_SIZE / PAGE_SIZE) - page_idx, count);
716
717 /* unmap page run */
718 for (uint i = 0; i < page_cnt; i++) {
719 l2_table[page_idx++] = 0;
720 }
721 DSB;
722
723 /* invalidate tlb */
724 for (uint i = 0; i < page_cnt; i++) {
725 arm_invalidate_tlb_mva_no_barrier(vaddr);
726 if (__builtin_add_overflow(vaddr, PAGE_SIZE, &vaddr)) {
727 ASSERT(i == page_cnt - 1);
728 ASSERT(count - page_cnt == 0);
729 }
730 }
731 count -= page_cnt;
732 unmapped += page_cnt;
733
734 /*
735 * Check if all pages related to this l1 entry are deallocated.
736 * We only need to check pages that we did not clear above starting
737 * from page_idx and wrapped around SECTION.
738 */
739 page_cnt = (SECTION_SIZE / PAGE_SIZE) - page_cnt;
740 while (page_cnt) {
741 if (page_idx == (SECTION_SIZE / PAGE_SIZE))
742 page_idx = 0;
743 if (l2_table[page_idx++])
744 break;
745 page_cnt--;
746 }
747 if (!page_cnt) {
748 /* we can kill l1 entry */
749 arm_mmu_unmap_l1_entry(aspace->tt_virt, l1_index);
750
751 /* try to free l2 page itself */
752 put_l2_table(aspace, l1_index, MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
753 }
754 break;
755 }
756
757 default:
758 // XXX not implemented supersections or L2 tables
759 PANIC_UNIMPLEMENTED;
760 }
761 }
762 arm_after_invalidate_tlb_barrier();
763 return unmapped;
764 }
765
arch_mmu_init_aspace(arch_aspace_t * aspace,vaddr_t base,size_t size,uint flags)766 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags)
767 {
768 LTRACEF("aspace %p, base 0x%" PRIxVADDR ", size 0x%zx, flags 0x%x\n", aspace, base, size, flags);
769
770 DEBUG_ASSERT(aspace);
771
772 /* validate that the base + size is sane and doesn't wrap */
773 DEBUG_ASSERT(size > PAGE_SIZE);
774 DEBUG_ASSERT(base + (size - 1) > base);
775
776 list_initialize(&aspace->pt_page_list);
777
778 if (flags & ARCH_ASPACE_FLAG_KERNEL) {
779 aspace->base = base;
780 aspace->size = size;
781 aspace->tt_virt = arm_kernel_translation_table;
782 aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
783 } else {
784 uint32_t *va = NULL;
785 uint32_t tt_sz = 1 << (14 - __builtin_clz(KERNEL_ASPACE_BASE - 1));
786
787 DEBUG_ASSERT(base + size - 1 < KERNEL_ASPACE_BASE);
788
789 aspace->base = base;
790 aspace->size = size;
791
792 if (tt_sz < PAGE_SIZE) {
793 va = memalign(tt_sz, tt_sz);
794 } else {
795 paddr_t pa;
796 if (pmm_alloc_contiguous(tt_sz / PAGE_SIZE, __builtin_ctz(tt_sz),
797 &pa, &aspace->pt_page_list)) {
798 va = paddr_to_kvaddr(pa);
799 }
800 }
801 if (!va)
802 return ERR_NO_MEMORY;
803
804 aspace->tt_virt = va;
805 aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
806
807 /* zero the top level translation table */
808 memset(aspace->tt_virt, 0, tt_sz);
809 }
810
811 LTRACEF("tt_phys 0x%" PRIxPADDR " tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
812
813 return NO_ERROR;
814 }
815
arch_mmu_destroy_aspace(arch_aspace_t * aspace)816 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace)
817 {
818 uint32_t tt_sz = 1 << (14 - __builtin_clz(KERNEL_ASPACE_BASE - 1));
819
820 LTRACEF("aspace %p\n", aspace);
821
822 if (aspace->tt_virt != arm_kernel_translation_table) {
823 /* Assume that this is user address space */
824 if (tt_sz < PAGE_SIZE)
825 free(aspace->tt_virt);
826 }
827
828 // XXX free all of the pages allocated in aspace->pt_page_list
829 pmm_free(&aspace->pt_page_list);
830 return NO_ERROR;
831 }
832
833 #endif // ARM_WITH_MMU
834