1 /*
2  * Copyright (c) 2014-2016 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #pragma once
24 
25 #include <arch.h>
26 #include <sys/types.h>
27 #include <compiler.h>
28 
29 /* to bring in definition of arch_aspace */
30 #include <arch/aspace.h>
31 
32 __BEGIN_CDECLS
33 
34 #define ARCH_MMU_FLAG_CACHED            (0U<<0)
35 #define ARCH_MMU_FLAG_UNCACHED          (1U<<0)
36 #define ARCH_MMU_FLAG_UNCACHED_DEVICE   (2U<<0) /* only exists on some arches, otherwise UNCACHED */
37 #define ARCH_MMU_FLAG_CACHE_MASK        (3U<<0)
38 
39 #define ARCH_MMU_FLAG_PERM_USER         (1U<<2)
40 #define ARCH_MMU_FLAG_PERM_RO           (1U<<3)
41 #define ARCH_MMU_FLAG_PERM_NO_EXECUTE   (1U<<4)
42 #define ARCH_MMU_FLAG_NS                (1U<<5) /* NON-SECURE */
43 #define ARCH_MMU_FLAG_TAGGED            (1U<<6)
44 #define ARCH_MMU_FLAG_INVALID           (1U<<7) /* indicates that flags are not specified */
45 
46 /* forward declare the per-address space arch-specific context object */
47 typedef struct arch_aspace arch_aspace_t;
48 
49 #define ARCH_ASPACE_FLAG_KERNEL         (1U<<0)
50 #define ARCH_ASPACE_FLAG_BTI            (1U<<1)
51 
52 #define ARCH_ASPACE_FLAG_ALL            (ARCH_ASPACE_FLAG_KERNEL | ARCH_ASPACE_FLAG_BTI)
53 
54 /* initialize per address space */
55 status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) __NONNULL((1));
56 status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) __NONNULL((1));
57 
58 /* routines to map/unmap/query mappings per address space */
59 int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, size_t count, uint flags) __NONNULL((1));
60 int arch_mmu_map_replace(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, size_t count, uint flags) __NONNULL((1));
61 int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, size_t count) __NONNULL((1));
62 status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags) __NONNULL((1));
63 
64 vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
65                            vaddr_t base, uint prev_region_arch_mmu_flags,
66                            vaddr_t end,  uint next_region_arch_mmu_flags,
67                            vaddr_t align, size_t size, uint arch_mmu_flags) __NONNULL((1));
68 
69 /* load a new user address space context.
70  * aspace argument NULL should unload user space.
71  */
72 void arch_mmu_context_switch(arch_aspace_t *aspace);
73 
74 void arch_disable_mmu(void);
75 
76 /**
77  * arch_mmu_map_early() - Map physical pages at virtual address during early
78  *                        boot.
79  * @vaddr: Virtual address to map the pages at.
80  * @paddr: Physical address of pages to map.
81  * @count: Number of pages to map.
82  * @flags: Mapping flags, a combination of %ARCH_MMU_FLAG_* flags.
83  *
84  * This maps @count consecutive pages that start at physical address @paddr
85  * in the kernel address space starting at @vaddr. This function is safe to call
86  * during early boot and internally uses boot_alloc_memalign() to allocate
87  * pages for the page tables.
88  */
89 void arch_mmu_map_early(vaddr_t vaddr, paddr_t paddr, size_t count, uint flags);
90 
91 __END_CDECLS
92 
93