1 /*
2  * Copyright (c) 2014 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef __ARCH_ARM64_MMU_H
25 #define __ARCH_ARM64_MMU_H
26 
27 #include <arch/defines.h>
28 
29 /* Arithmetically select t or e based on c, to enable pre-processor evaluation */
30 #define IFTE(c,t,e) (!!(c) * (t) | !(c) * (e))
31 
32 /* Arithmetically count leading zero's in passed value */
33 #define NBITS01(n)      IFTE(n, 1, 0)
34 #define NBITS02(n)      IFTE((n) >>  1,  1 + NBITS01((n) >>  1), NBITS01(n))
35 #define NBITS04(n)      IFTE((n) >>  2,  2 + NBITS02((n) >>  2), NBITS02(n))
36 #define NBITS08(n)      IFTE((n) >>  4,  4 + NBITS04((n) >>  4), NBITS04(n))
37 #define NBITS16(n)      IFTE((n) >>  8,  8 + NBITS08((n) >>  8), NBITS08(n))
38 #define NBITS32(n)      IFTE((n) >> 16, 16 + NBITS16((n) >> 16), NBITS16(n))
39 #define NBITS(n)        IFTE((n) >> 32, 32 + NBITS32((n) >> 32), NBITS32(n))
40 
41 #ifndef MMU_KERNEL_SIZE_SHIFT
42 #define KERNEL_ASPACE_BITS (NBITS(0xffffffffffffffff-KERNEL_ASPACE_BASE))
43 #define KERNEL_BASE_BITS (NBITS(0xffffffffffffffff-KERNEL_BASE))
44 #if KERNEL_BASE_BITS > KERNEL_ASPACE_BITS
45 #define KERNEL_ASPACE_BITS KERNEL_BASE_BITS /* KERNEL_BASE should not be below KERNEL_ASPACE_BASE */
46 #endif
47 
48 #if KERNEL_ASPACE_BITS < 25
49 #define MMU_KERNEL_SIZE_SHIFT (25)
50 #else
51 #define MMU_KERNEL_SIZE_SHIFT (KERNEL_ASPACE_BITS)
52 #endif
53 #endif
54 
55 #ifndef MMU_USER_SIZE_SHIFT
56 #define MMU_USER_SIZE_SHIFT (NBITS(USER_ASPACE_SIZE))
57 #endif
58 
59 #ifndef MMU_IDENT_SIZE_SHIFT
60 #define MMU_IDENT_SIZE_SHIFT 42 /* Max size supported by block mappings */
61 #endif
62 
63 #define MMU_KERNEL_PAGE_SIZE_SHIFT      (PAGE_SIZE_SHIFT)
64 #define MMU_USER_PAGE_SIZE_SHIFT        (USER_PAGE_SIZE_SHIFT)
65 
66 #if MMU_IDENT_SIZE_SHIFT < 25
67 #error MMU_IDENT_SIZE_SHIFT too small
68 #elif MMU_IDENT_SIZE_SHIFT <= 29 /* Use 2MB block mappings (4K page size) */
69 #define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_4K)
70 #elif MMU_IDENT_SIZE_SHIFT <= 30 /* Use 512MB block mappings (64K page size) */
71 #define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_64K)
72 #elif MMU_IDENT_SIZE_SHIFT <= 39 /* Use 1GB block mappings (4K page size) */
73 #define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_4K)
74 #elif MMU_IDENT_SIZE_SHIFT <= 42 /* Use 512MB block mappings (64K page size) */
75 #define MMU_IDENT_PAGE_SIZE_SHIFT       (SHIFT_64K)
76 #else
77 #error MMU_IDENT_SIZE_SHIFT too large
78 #endif
79 
80 /*
81  * TCR TGx values
82  *
83  * Page size:   4K      16K     64K
84  * TG0:         0       2       1
85  * TG1:         2       1       3
86  */
87 
88 #define MMU_TG0(page_size_shift) ((((page_size_shift == 14) & 1) << 1) | \
89                                   ((page_size_shift == 16) & 1))
90 
91 #define MMU_TG1(page_size_shift) ((((page_size_shift == 12) & 1) << 1) | \
92                                   ((page_size_shift == 14) & 1) | \
93                                   ((page_size_shift == 16) & 1) | \
94                                   (((page_size_shift == 16) & 1) << 1))
95 
96 #define MMU_LX_X(page_shift, level) ((4 - (level)) * ((page_shift) - 3) + 3)
97 
98 #if MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 0)
99 #define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 0)
100 #elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 1)
101 #define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 1)
102 #elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 2)
103 #define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 2)
104 #elif MMU_USER_SIZE_SHIFT > MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 3)
105 #define MMU_USER_TOP_SHIFT MMU_LX_X(MMU_USER_PAGE_SIZE_SHIFT, 3)
106 #else
107 #error User address space size must be larger than page size
108 #endif
109 #define MMU_USER_PAGE_TABLE_ENTRIES_TOP (0x1 << (MMU_USER_SIZE_SHIFT - MMU_USER_TOP_SHIFT))
110 
111 #if MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 0)
112 #define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 0)
113 #elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 1)
114 #define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 1)
115 #elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2)
116 #define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2)
117 #elif MMU_KERNEL_SIZE_SHIFT > MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 3)
118 #define MMU_KERNEL_TOP_SHIFT MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 3)
119 #else
120 #error Kernel address space size must be larger than page size
121 #endif
122 #define MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP (0x1 << (MMU_KERNEL_SIZE_SHIFT - MMU_KERNEL_TOP_SHIFT))
123 
124 #if MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 0)
125 #define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 0)
126 #elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 1)
127 #define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 1)
128 #elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
129 #define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
130 #elif MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 3)
131 #define MMU_IDENT_TOP_SHIFT MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 3)
132 #else
133 #error Ident address space size must be larger than page size
134 #endif
135 #define MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT (MMU_IDENT_SIZE_SHIFT - MMU_IDENT_TOP_SHIFT)
136 #define MMU_PAGE_TABLE_ENTRIES_IDENT (0x1 << MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT)
137 
138 #define MMU_PTE_DESCRIPTOR_BLOCK_MAX_SHIFT      (30)
139 
140 #ifndef ASSEMBLY
141 #define BM(base, count, val) (((val) & ((1UL << (count)) - 1)) << (base))
142 #else
143 #define BM(base, count, val) (((val) & ((0x1 << (count)) - 1)) << (base))
144 #endif
145 
146 #define MMU_SH_NON_SHAREABLE                    (0)
147 #define MMU_SH_OUTER_SHAREABLE                  (2)
148 #define MMU_SH_INNER_SHAREABLE                  (3)
149 
150 #define MMU_RGN_NON_CACHEABLE                   (0)
151 #define MMU_RGN_WRITE_BACK_ALLOCATE             (1)
152 #define MMU_RGN_WRITE_THROUGH_NO_ALLOCATE       (2)
153 #define MMU_RGN_WRITE_BACK_NO_ALLOCATE          (3)
154 
155 #define MMU_TCR_TBID1                           BM(52, 1, 1)
156 #define MMU_TCR_TBID0                           BM(51, 1, 1)
157 #define MMU_TCR_TBI1                            BM(38, 1, 1)
158 #define MMU_TCR_TBI0                            BM(37, 1, 1)
159 #define MMU_TCR_AS                              BM(36, 1, 1)
160 #define MMU_TCR_IPS(size)                       BM(32, 3, (size))
161 #define MMU_TCR_TG1(granule_size)               BM(30, 2, (granule_size))
162 #define MMU_TCR_SH1(shareability_flags)         BM(28, 2, (shareability_flags))
163 #define MMU_TCR_ORGN1(cache_flags)              BM(26, 2, (cache_flags))
164 #define MMU_TCR_IRGN1(cache_flags)              BM(24, 2, (cache_flags))
165 #define MMU_TCR_EPD1                            BM(23, 1, 1)
166 #define MMU_TCR_A1                              BM(22, 1, 1)
167 #define MMU_TCR_T1SZ(size)                      BM(16, 6, (size))
168 #define MMU_TCR_TG0(granule_size)               BM(14, 2, (granule_size))
169 #define MMU_TCR_SH0(shareability_flags)         BM(12, 2, (shareability_flags))
170 #define MMU_TCR_ORGN0(cache_flags)              BM(10, 2, (cache_flags))
171 #define MMU_TCR_IRGN0(cache_flags)              BM( 8, 2, (cache_flags))
172 #define MMU_TCR_EPD0                            BM( 7, 1, 1)
173 #define MMU_TCR_T0SZ(size)                      BM( 0, 6, (size))
174 
175 #define MMU_MAIR_ATTR(index, attr)              BM(index * 8, 8, (attr))
176 
177 
178 /* L0/L1/L2/L3 descriptor types */
179 #define MMU_PTE_DESCRIPTOR_INVALID              BM(0, 2, 0)
180 #define MMU_PTE_DESCRIPTOR_MASK                 BM(0, 2, 3)
181 
182 /* L0/L1/L2 descriptor types */
183 #define MMU_PTE_L012_DESCRIPTOR_BLOCK           BM(0, 2, 1)
184 #define MMU_PTE_L012_DESCRIPTOR_TABLE           BM(0, 2, 3)
185 
186 /* L3 descriptor types */
187 #define MMU_PTE_L3_DESCRIPTOR_PAGE              BM(0, 2, 3)
188 
189 /* Output address mask */
190 #define MMU_PTE_OUTPUT_ADDR_MASK                BM(12, 36, 0xfffffffff)
191 
192 /* Table attrs */
193 #define MMU_PTE_ATTR_NS_TABLE                   BM(63, 1, 1)
194 #define MMU_PTE_ATTR_AP_TABLE_NO_WRITE          BM(62, 1, 1)
195 #define MMU_PTE_ATTR_AP_TABLE_NO_EL0            BM(61, 1, 1)
196 #define MMU_PTE_ATTR_UXN_TABLE                  BM(60, 1, 1)
197 #define MMU_PTE_ATTR_PXN_TABLE                  BM(59, 1, 1)
198 
199 /* Block/Page attrs */
200 #define MMU_PTE_ATTR_RES_SOFTWARE               BM(55, 4, 0xf)
201 #define MMU_PTE_ATTR_UXN                        BM(54, 1, 1)
202 #define MMU_PTE_ATTR_PXN                        BM(53, 1, 1)
203 #define MMU_PTE_ATTR_CONTIGUOUS                 BM(52, 1, 1)
204 #define MMU_PTE_ATTR_GP                         BM(50, 1, 1)
205 
206 #define MMU_PTE_ATTR_NON_GLOBAL                 BM(11, 1, 1)
207 #define MMU_PTE_ATTR_AF                         BM(10, 1, 1)
208 
209 #define MMU_PTE_ATTR_SH_NON_SHAREABLE           BM(8, 2, 0)
210 #define MMU_PTE_ATTR_SH_OUTER_SHAREABLE         BM(8, 2, 2)
211 #define MMU_PTE_ATTR_SH_INNER_SHAREABLE         BM(8, 2, 3)
212 
213 #define MMU_PTE_ATTR_AP_P_RW_U_NA               BM(6, 2, 0)
214 #define MMU_PTE_ATTR_AP_P_RW_U_RW               BM(6, 2, 1)
215 #define MMU_PTE_ATTR_AP_P_RO_U_NA               BM(6, 2, 2)
216 #define MMU_PTE_ATTR_AP_P_RO_U_RO               BM(6, 2, 3)
217 #define MMU_PTE_ATTR_AP_MASK                    BM(6, 2, 3)
218 
219 #define MMU_PTE_ATTR_NON_SECURE                 BM(5, 1, 1)
220 
221 #define MMU_PTE_ATTR_ATTR_INDEX(attrindex)      BM(2, 3, attrindex)
222 #define MMU_PTE_ATTR_ATTR_INDEX_MASK            MMU_PTE_ATTR_ATTR_INDEX(7)
223 
224 /* Default configuration for main kernel page table:
225  *    - do cached translation walks
226  */
227 
228 /* Device-nGnRnE memory */
229 #define MMU_MAIR_ATTR0                  MMU_MAIR_ATTR(0, 0x00)
230 #define MMU_PTE_ATTR_STRONGLY_ORDERED   MMU_PTE_ATTR_ATTR_INDEX(0)
231 
232 /* Device-nGnRE memory */
233 #define MMU_MAIR_ATTR1                  MMU_MAIR_ATTR(1, 0x04)
234 #define MMU_PTE_ATTR_DEVICE             MMU_PTE_ATTR_ATTR_INDEX(1)
235 
236 /* Normal Memory, Outer Write-back non-transient Read/Write allocate,
237  * Inner Write-back non-transient Read/Write allocate
238  */
239 #define MMU_MAIR_ATTR2                  MMU_MAIR_ATTR(2, 0xff)
240 #define MMU_PTE_ATTR_NORMAL_MEMORY      MMU_PTE_ATTR_ATTR_INDEX(2)
241 
242 /* As Normal Memory above, but tagged */
243 #define MMU_MAIR_ATTR3                  MMU_MAIR_ATTR(3, 0xf0)
244 #define MMU_PTE_ATTR_NORMAL_MEMORY_TAGGED MMU_PTE_ATTR_ATTR_INDEX(3)
245 
246 #define MMU_MAIR_ATTR4                  (0)
247 #define MMU_MAIR_ATTR5                  (0)
248 #define MMU_MAIR_ATTR6                  (0)
249 #define MMU_MAIR_ATTR7                  (0)
250 
251 #define MMU_MAIR_VAL                    (MMU_MAIR_ATTR0 | MMU_MAIR_ATTR1 | \
252                                          MMU_MAIR_ATTR2 | MMU_MAIR_ATTR3 | \
253                                          MMU_MAIR_ATTR4 | MMU_MAIR_ATTR5 | \
254                                          MMU_MAIR_ATTR6 | MMU_MAIR_ATTR7 )
255 
256 #define MMU_TCR_IPS_DEFAULT MMU_TCR_IPS(2) /* TODO: read at runtime, or configure per platform */
257 
258 /* Enable cached page table walks:
259  * inner/outer (IRGN/ORGN): write-back + write-allocate
260  */
261 #define MMU_TCR_FLAGS1 (MMU_TCR_TG1(MMU_TG1(MMU_KERNEL_PAGE_SIZE_SHIFT)) | \
262                         MMU_TCR_SH1(MMU_SH_INNER_SHAREABLE) | \
263                         MMU_TCR_ORGN1(MMU_RGN_WRITE_BACK_ALLOCATE) | \
264                         MMU_TCR_IRGN1(MMU_RGN_WRITE_BACK_ALLOCATE) | \
265                         MMU_TCR_T1SZ(64 - MMU_KERNEL_SIZE_SHIFT))
266 #define MMU_TCR_FLAGS0 (MMU_TCR_TG0(MMU_TG0(MMU_USER_PAGE_SIZE_SHIFT)) | \
267                         MMU_TCR_SH0(MMU_SH_INNER_SHAREABLE) | \
268                         MMU_TCR_ORGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
269                         MMU_TCR_IRGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
270                         MMU_TCR_T0SZ(64 - MMU_USER_SIZE_SHIFT) | \
271                         MMU_TCR_TBI0)
272 #define MMU_TCR_FLAGS0_IDENT \
273                        (MMU_TCR_TG0(MMU_TG0(MMU_IDENT_PAGE_SIZE_SHIFT)) | \
274                         MMU_TCR_SH0(MMU_SH_INNER_SHAREABLE) | \
275                         MMU_TCR_ORGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
276                         MMU_TCR_IRGN0(MMU_RGN_WRITE_BACK_ALLOCATE) | \
277                         MMU_TCR_T0SZ(64 - MMU_IDENT_SIZE_SHIFT) | \
278                         MMU_TCR_TBI0)
279 #define MMU_TCR_FLAGS_IDENT (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0_IDENT)
280 #define MMU_TCR_FLAGS_KERNEL (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0 | MMU_TCR_EPD0)
281 #define MMU_TCR_FLAGS_USER (MMU_TCR_IPS_DEFAULT | MMU_TCR_FLAGS1 | MMU_TCR_FLAGS0)
282 
283 
284 #if MMU_IDENT_SIZE_SHIFT > MMU_LX_X(MMU_IDENT_PAGE_SIZE_SHIFT, 2)
285 #define MMU_PTE_IDENT_DESCRIPTOR MMU_PTE_L012_DESCRIPTOR_BLOCK
286 #else
287 #define MMU_PTE_IDENT_DESCRIPTOR MMU_PTE_L3_DESCRIPTOR_PAGE
288 #endif
289 #define MMU_PTE_IDENT_FLAGS \
290     (MMU_PTE_IDENT_DESCRIPTOR | \
291      MMU_PTE_ATTR_AF | \
292      MMU_PTE_ATTR_SH_INNER_SHAREABLE | \
293      MMU_PTE_ATTR_NORMAL_MEMORY | \
294      MMU_PTE_ATTR_AP_P_RW_U_NA)
295 
296 #define MMU_PTE_IDENT_FLAGS_TAGGED \
297     (MMU_PTE_IDENT_DESCRIPTOR | \
298      MMU_PTE_ATTR_AF | \
299      MMU_PTE_ATTR_SH_INNER_SHAREABLE | \
300      MMU_PTE_ATTR_NORMAL_MEMORY_TAGGED | \
301      MMU_PTE_ATTR_AP_P_RW_U_NA)
302 
303 #ifndef ASSEMBLY
304 
305 #include <sys/types.h>
306 #include <assert.h>
307 #include <compiler.h>
308 #include <arch/arm64.h>
309 
310 typedef uint64_t pte_t;
311 
312 __BEGIN_CDECLS
313 
314 #define ARM64_TLBI_NOADDR(op) \
315 ({ \
316     __asm__ volatile("tlbi " #op::); \
317     ISB; \
318 })
319 
320 #define ARM64_TLBI(op, val) \
321 ({ \
322     __asm__ volatile("tlbi " #op ", %0" :: "r" (val)); \
323     ISB; \
324 })
325 
326 #define MMU_ARM64_GLOBAL_ASID (~0U)
327 int arm64_mmu_map(vaddr_t vaddr, paddr_t paddr, size_t size, pte_t attrs,
328                   vaddr_t vaddr_base, uint top_size_shift,
329                   uint top_index_shift, uint page_size_shift,
330                   pte_t *top_page_table, uint asid, bool replace);
331 int arm64_mmu_unmap(vaddr_t vaddr, size_t size,
332                     vaddr_t vaddr_base, uint top_size_shift,
333                     uint top_index_shift, uint page_size_shift,
334                     pte_t *top_page_table, uint asid);
335 
336 __END_CDECLS
337 #endif /* ASSEMBLY */
338 
339 #endif
340