1 /*
2  * Copyright (c) 2008-2014 Travis Geiselbrecht
3  * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files
7  * (the "Software"), to deal in the Software without restriction,
8  * including without limitation the rights to use, copy, modify, merge,
9  * publish, distribute, sublicense, and/or sell copies of the Software,
10  * and to permit persons to whom the Software is furnished to do so,
11  * subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be
14  * included in all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 #ifndef __ARCH_ARM_MMU_H
25 #define __ARCH_ARM_MMU_H
26 
27 #define KB                (1024UL)
28 #define MB                (1024UL*1024UL)
29 #define GB                (1024UL*1024UL*1024UL)
30 
31 #define SECTION_SIZE      MB
32 #define SUPERSECTION_SIZE (16 * MB)
33 #define TT_ENTRY_COUNT    (4096)
34 
35 #if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7)
36 
37 #define MMU_MEMORY_L1_DESCRIPTOR_INVALID                 (0x0 << 0)
38 #define MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE              (0x1 << 0)
39 #define MMU_MEMORY_L1_DESCRIPTOR_SECTION                 (0x2 << 0)
40 #define MMU_MEMORY_L1_DESCRIPTOR_SUPERSECTION            ((0x2 << 0) | (0x1 << 18))
41 #define MMU_MEMORY_L1_DESCRIPTOR_MASK                    (0x3 << 0)
42 
43 #define MMU_MEMORY_L2_DESCRIPTOR_INVALID                 (0x0 << 0)
44 #define MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE              (0x1 << 0)
45 #define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE              (0x2 << 0)
46 #define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN           (0x3 << 0)
47 #define MMU_MEMORY_L2_DESCRIPTOR_MASK                    (0x3 << 0)
48 
49 /* C, B and TEX[2:0] encodings without TEX remap (for first level descriptors) */
50 /* TEX      |    CB    */
51 #define MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED              ((0x0 << 12) | (0x0 << 2))
52 #define MMU_MEMORY_L1_TYPE_DEVICE_SHARED                 ((0x0 << 12) | (0x1 << 2))
53 #define MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED             ((0x2 << 12) | (0x0 << 2))
54 #define MMU_MEMORY_L1_TYPE_NORMAL                        ((0x1 << 12) | (0x0 << 2))
55 #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_THROUGH          ((0x0 << 12) | (0x2 << 2))
56 #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 12) | (0x3 << 2))
57 #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE    ((0x1 << 12) | (0x3 << 2))
58 #define MMU_MEMORY_L1_TYPE_MASK                          ((0x7 << 12) | (0x3 << 2))
59 
60 #define MMU_MEMORY_L1_TYPE_INNER_WRITE_BACK_ALLOCATE     ((0x4 << 12) | (0x1 << 2))
61 
62 /* C, B and TEX[2:0] encodings without TEX remap (for second level descriptors) */
63 /* TEX     |    CB    */
64 #define MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED              ((0x0 << 6) | (0x0 << 2))
65 #define MMU_MEMORY_L2_TYPE_DEVICE_SHARED                 ((0x0 << 6) | (0x1 << 2))
66 #define MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED             ((0x2 << 6) | (0x0 << 2))
67 #define MMU_MEMORY_L2_TYPE_NORMAL                        ((0x1 << 6) | (0x0 << 2))
68 #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_THROUGH          ((0x0 << 6) | (0x2 << 2))
69 #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 6) | (0x3 << 2))
70 #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE    ((0x1 << 6) | (0x3 << 2))
71 #define MMU_MEMORY_L2_TYPE_MASK                          ((0x7 << 6) | (0x3 << 2))
72 
73 #define MMU_MEMORY_DOMAIN_MEM                            (0)
74 
75 /*
76  * AP (Access Permissions)
77  * +-------------------------+
78  * | AP        P         U   |
79  * +-------------------------+
80  * |                         |
81  * | 000      NA        NA   |
82  * |                         |
83  * | 001      RW        NA   |
84  * |                         |
85  * | 010      RW        R    |
86  * |                         |
87  * | 011      RW        RW   |
88  * |                         |
89  * | 101      R         NA   |
90  * |                         |
91  * | 111      R         R    |
92  * |                         |
93  * +-------------------------+
94  *
95  * NA = No Access
96  * RW = Read/Write
97  * R  = Read only
98  *
99  * P = Privileged modes
100  * U = ~P
101  *
102  */
103 #define MMU_MEMORY_L1_AP_P_NA_U_NA          ((0x0 << 15) | (0x0 << 10))
104 #define MMU_MEMORY_L1_AP_P_RW_U_RO          ((0x0 << 15) | (0x2 << 10)) /* Obsolete */
105 #define MMU_MEMORY_L1_AP_P_RW_U_RW          ((0x0 << 15) | (0x3 << 10))
106 #define MMU_MEMORY_L1_AP_P_RW_U_NA          ((0x0 << 15) | (0x1 << 10))
107 #define MMU_MEMORY_L1_AP_P_RO_U_RO          ((0x1 << 15) | (0x3 << 10))
108 #define MMU_MEMORY_L1_AP_P_RO_U_NA          ((0x1 << 15) | (0x1 << 10))
109 #define MMU_MEMORY_L1_AP_MASK               ((0x1 << 15) | (0x3 << 10))
110 
111 #define MMU_MEMORY_L2_AP_P_NA_U_NA          ((0x0 << 9) | (0x0 << 4))
112 #define MMU_MEMORY_L2_AP_P_RW_U_RO          ((0x0 << 9) | (0x2 << 4)) /* Obsolete */
113 #define MMU_MEMORY_L2_AP_P_RW_U_RW          ((0x0 << 9) | (0x3 << 4))
114 #define MMU_MEMORY_L2_AP_P_RW_U_NA          ((0x0 << 9) | (0x1 << 4))
115 #define MMU_MEMORY_L2_AP_P_RO_U_RO          ((0x1 << 9) | (0x3 << 4))
116 #define MMU_MEMORY_L2_AP_P_RO_U_NA          ((0x1 << 9) | (0x1 << 4))
117 #define MMU_MEMORY_L2_AP_MASK               ((0x1 << 9) | (0x3 << 4))
118 
119 #define MMU_MEMORY_L1_PAGETABLE_NON_SECURE  (1 << 3)
120 
121 #define MMU_MEMORY_L1_SECTION_NON_SECURE    (1 << 19)
122 #define MMU_MEMORY_L1_SECTION_SHAREABLE     (1 << 16)
123 #define MMU_MEMORY_L1_SECTION_NON_GLOBAL    (1 << 17)
124 #define MMU_MEMORY_L1_SECTION_XN            (1 << 4)
125 
126 #define MMU_MEMORY_L1_CB_SHIFT              2
127 #define MMU_MEMORY_L1_TEX_SHIFT            12
128 
129 #define MMU_MEMORY_SET_L1_INNER(val)        (((val) & 0x3) << MMU_MEMORY_L1_CB_SHIFT)
130 #define MMU_MEMORY_SET_L1_OUTER(val)        (((val) & 0x3) << MMU_MEMORY_L1_TEX_SHIFT)
131 #define MMU_MEMORY_SET_L1_CACHEABLE_MEM     (0x4 << MMU_MEMORY_L1_TEX_SHIFT)
132 
133 #define MMU_MEMORY_L2_SHAREABLE             (1 << 10)
134 #define MMU_MEMORY_L2_NON_GLOBAL            (1 << 11)
135 
136 #define MMU_MEMORY_L2_CB_SHIFT              2
137 #define MMU_MEMORY_L2_TEX_SHIFT             6
138 
139 #define MMU_MEMORY_NON_CACHEABLE            0
140 #define MMU_MEMORY_WRITE_BACK_ALLOCATE      1
141 #define MMU_MEMORY_WRITE_THROUGH_NO_ALLOCATE 2
142 #define MMU_MEMORY_WRITE_BACK_NO_ALLOCATE   3
143 
144 #define MMU_MEMORY_SET_L2_INNER(val)        (((val) & 0x3) << MMU_MEMORY_L2_CB_SHIFT)
145 #define MMU_MEMORY_SET_L2_OUTER(val)        (((val) & 0x3) << MMU_MEMORY_L2_TEX_SHIFT)
146 #define MMU_MEMORY_SET_L2_CACHEABLE_MEM     (0x4 << MMU_MEMORY_L2_TEX_SHIFT)
147 
148 #define MMU_MEMORY_L1_SECTION_ADDR(x)       ((x) & ~((1U<<20)-1))
149 #define MMU_MEMORY_L1_PAGE_TABLE_ADDR(x)    ((x) & ~((1U<<10)-1))
150 
151 #define MMU_MEMORY_L2_SMALL_PAGE_ADDR(x)    ((x) & ~((1U<<12)-1))
152 #define MMU_MEMORY_L2_LARGE_PAGE_ADDR(x)    ((x) & ~((1U<<16)-1))
153 
154 #define MMU_MEMORY_TTBR_RGN(x)              (((x) & 0x3) << 3)
155 /* IRGN[1:0] is encoded as: IRGN[0] in TTBRx[6], and IRGN[1] in TTBRx[0] */
156 #define MMU_MEMORY_TTBR_IRGN(x)             ((((x) & 0x1) << 6) | \
157                                             ((((x) >> 1) & 0x1) << 0))
158 #define MMU_MEMORY_TTBR_S                   (1 << 1)
159 #define MMU_MEMORY_TTBR_NOS                 (1 << 5)
160 
161 /* Default configuration for main kernel page table:
162  *    - section mappings for memory
163  *    - do cached translation walks
164  */
165 
166 /* Enable cached page table walks:
167  * inner/outer (IRGN/RGN): write-back + write-allocate
168  * (select inner sharable on smp)
169  */
170 #if WITH_SMP | WITH_SHAREABLE_CACHE
171 #define MMU_TTBRx_SHARABLE_FLAGS (MMU_MEMORY_TTBR_S | MMU_MEMORY_TTBR_NOS)
172 #else
173 #define MMU_TTBRx_SHARABLE_FLAGS (0)
174 #endif
175 #define MMU_TTBRx_FLAGS \
176     (MMU_MEMORY_TTBR_RGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) |\
177      MMU_MEMORY_TTBR_IRGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) | \
178      MMU_TTBRx_SHARABLE_FLAGS)
179 
180 /* Section mapping, TEX[2:0]=001, CB=11, S=1, AP[2:0]=001 */
181 #if WITH_SMP | WITH_SHAREABLE_CACHE
182 #define MMU_KERNEL_L1_PTE_FLAGS \
183     (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
184      MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \
185      MMU_MEMORY_L1_AP_P_RW_U_NA | \
186      MMU_MEMORY_L1_SECTION_SHAREABLE)
187 #else
188 #define MMU_KERNEL_L1_PTE_FLAGS \
189     (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
190      MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \
191      MMU_MEMORY_L1_AP_P_RW_U_NA)
192 #endif
193 
194 #define MMU_INITIAL_MAP_STRONGLY_ORDERED \
195     (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
196     MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED | \
197     MMU_MEMORY_L1_AP_P_RW_U_NA)
198 
199 #define MMU_INITIAL_MAP_DEVICE \
200     (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \
201     MMU_MEMORY_L1_TYPE_DEVICE_SHARED | \
202     MMU_MEMORY_L1_AP_P_RW_U_NA)
203 
204 #endif // armv6 | armv7
205 
206 #ifndef ASSEMBLY
207 
208 #include <sys/types.h>
209 #include <assert.h>
210 #include <compiler.h>
211 #include <arch/arm.h>
212 
213 __BEGIN_CDECLS
214 
215 void arm_mmu_early_init(void);
216 void arm_mmu_init(void);
217 status_t arm_vtop(addr_t va, addr_t *pa);
218 
219 /* tlb routines */
220 
arm_after_invalidate_tlb_barrier(void)221 static inline void arm_after_invalidate_tlb_barrier(void)
222 {
223 #if WITH_SMP | WITH_SHAREABLE_CACHE
224     arm_write_bpiallis(0);
225 #else
226     arm_write_bpiall(0);
227 #endif
228     DSB;
229     ISB;
230 }
231 
arm_invalidate_tlb_global_no_barrier(void)232 static inline void arm_invalidate_tlb_global_no_barrier(void)
233 {
234 #if WITH_SMP | WITH_SHAREABLE_CACHE
235     arm_write_tlbiallis(0);
236 #else
237     arm_write_tlbiall(0);
238 #endif
239 }
240 
arm_invalidate_tlb_global(void)241 static inline void arm_invalidate_tlb_global(void)
242 {
243     DSB;
244     arm_invalidate_tlb_global_no_barrier();
245     arm_after_invalidate_tlb_barrier();
246 }
247 
arm_invalidate_tlb_mva_no_barrier(vaddr_t va)248 static inline void arm_invalidate_tlb_mva_no_barrier(vaddr_t va)
249 {
250 #if WITH_SMP | WITH_SHAREABLE_CACHE
251     arm_write_tlbimvaais(va & 0xfffff000);
252 #else
253     arm_write_tlbimvaa(va & 0xfffff000);
254 #endif
255 }
256 
arm_invalidate_tlb_mva(vaddr_t va)257 static inline void arm_invalidate_tlb_mva(vaddr_t va)
258 {
259     DSB;
260     arm_invalidate_tlb_mva_no_barrier(va);
261     arm_after_invalidate_tlb_barrier();
262 }
263 
264 
arm_invalidate_tlb_asid_no_barrier(uint8_t asid)265 static inline void arm_invalidate_tlb_asid_no_barrier(uint8_t asid)
266 {
267 #if WITH_SMP | WITH_SHAREABLE_CACHE
268     arm_write_tlbiasidis(asid);
269 #else
270     arm_write_tlbiasid(asid);
271 #endif
272 }
273 
arm_invalidate_tlb_asid(uint8_t asid)274 static inline void arm_invalidate_tlb_asid(uint8_t asid)
275 {
276     DSB;
277     arm_invalidate_tlb_asid_no_barrier(asid);
278     arm_after_invalidate_tlb_barrier();
279 }
280 
arm_invalidate_tlb_mva_asid_no_barrier(vaddr_t va,uint8_t asid)281 static inline void arm_invalidate_tlb_mva_asid_no_barrier(vaddr_t va, uint8_t asid)
282 {
283 #if WITH_SMP | WITH_SHAREABLE_CACHE
284     arm_write_tlbimvais((va & 0xfffff000) | asid);
285 #else
286     arm_write_tlbimva((va & 0xfffff000) | asid);
287 #endif
288 }
289 
arm_invalidate_tlb_mva_asid(vaddr_t va,uint8_t asid)290 static inline void arm_invalidate_tlb_mva_asid(vaddr_t va, uint8_t asid)
291 {
292     DSB;
293     arm_invalidate_tlb_mva_asid_no_barrier(va, asid);
294     arm_after_invalidate_tlb_barrier();
295 }
296 
297 __END_CDECLS
298 
299 #endif /* ASSEMBLY */
300 
301 #endif
302