1 /* SPDX-License-Identifier: BSD-3-Clause */
2
3 #include <assert.h>
4 #include <stdint.h>
5 #include <string.h>
6 #include <symbols.h>
7
8 #include <console/console.h>
9 #include <arch/mmu.h>
10 #include <arch/lib_helpers.h>
11 #include <arch/cache.h>
12
13 /* This just caches the next free table slot (okay to do since they fill up from
14 * bottom to top and can never be freed up again). It will reset to its initial
15 * value on stage transition, so we still need to check it for UNUSED_DESC. */
16 static uint64_t *next_free_table = (void *)_ttb;
17
print_tag(int level,uint64_t tag)18 static void print_tag(int level, uint64_t tag)
19 {
20 printk(level, tag & MA_MEM_NC ? "non-cacheable | " :
21 " cacheable | ");
22 printk(level, tag & MA_RO ? "read-only | " :
23 "read-write | ");
24 printk(level, tag & MA_NS ? "non-secure | " :
25 " secure | ");
26 printk(level, tag & MA_MEM ? "normal\n" :
27 "device\n");
28 }
29
30 /* Func : get_block_attr
31 * Desc : Get block descriptor attributes based on the value of tag in memrange
32 * region
33 */
get_block_attr(unsigned long tag)34 static uint64_t get_block_attr(unsigned long tag)
35 {
36 uint64_t attr;
37
38 attr = (tag & MA_NS) ? BLOCK_NS : 0;
39 attr |= (tag & MA_RO) ? BLOCK_AP_RO : BLOCK_AP_RW;
40 attr |= BLOCK_ACCESS;
41
42 if (tag & MA_MEM) {
43 attr |= BLOCK_SH_INNER_SHAREABLE;
44 if (tag & MA_MEM_NC)
45 attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
46 else
47 attr |= BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT;
48 } else {
49 attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
50 attr |= BLOCK_XN;
51 }
52
53 return attr;
54 }
55
56 /* Func : setup_new_table
57 * Desc : Get next free table from TTB and set it up to match old parent entry.
58 */
setup_new_table(uint64_t desc,size_t xlat_size)59 static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
60 {
61 while (next_free_table[0] != UNUSED_DESC) {
62 next_free_table += GRANULE_SIZE/sizeof(*next_free_table);
63 if (_ettb - (u8 *)next_free_table <= 0)
64 die("Ran out of page table space!");
65 }
66
67 void *frame_base = (void *)(desc & XLAT_ADDR_MASK);
68 printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page"
69 " table @%p\n", frame_base, frame_base +
70 (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table);
71
72 if (!desc) {
73 memset(next_free_table, 0, GRANULE_SIZE);
74 } else {
75 /* Can reuse old parent entry, but may need to adjust type. */
76 if (xlat_size == L3_XLAT_SIZE)
77 desc |= PAGE_DESC;
78
79 int i = 0;
80 for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) {
81 next_free_table[i] = desc;
82 desc += xlat_size;
83 }
84 }
85
86 return next_free_table;
87 }
88
89 /* Func: get_next_level_table
90 * Desc: Check if the table entry is a valid descriptor. If not, initialize new
91 * table, update the entry and return the table addr. If valid, return the addr
92 */
get_next_level_table(uint64_t * ptr,size_t xlat_size)93 static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
94 {
95 uint64_t desc = *ptr;
96
97 if ((desc & DESC_MASK) != TABLE_DESC) {
98 uint64_t *new_table = setup_new_table(desc, xlat_size);
99 desc = ((uint64_t)new_table) | TABLE_DESC;
100 *ptr = desc;
101 }
102 return (uint64_t *)(desc & XLAT_ADDR_MASK);
103 }
104
105 /* Func : init_xlat_table
106 * Desc : Given a base address and size, it identifies the indices within
107 * different level XLAT tables which map the given base addr. Similar to table
108 * walk, except that all invalid entries during the walk are updated
109 * accordingly. On success, it returns the size of the block/page addressed by
110 * the final table.
111 */
init_xlat_table(uint64_t base_addr,uint64_t size,uint64_t tag)112 static uint64_t init_xlat_table(uint64_t base_addr,
113 uint64_t size,
114 uint64_t tag)
115 {
116 uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
117 uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
118 uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
119 uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
120 uint64_t *table = (uint64_t *)_ttb;
121 uint64_t desc;
122 uint64_t attr = get_block_attr(tag);
123
124 /* L0 entry stores a table descriptor (doesn't support blocks) */
125 table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
126
127 /* L1 table lookup */
128 if ((size >= L1_XLAT_SIZE) &&
129 IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
130 /* If block address is aligned and size is greater than
131 * or equal to size addressed by each L1 entry, we can
132 * directly store a block desc */
133 desc = base_addr | BLOCK_DESC | attr;
134 table[l1_index] = desc;
135 /* L2 lookup is not required */
136 return L1_XLAT_SIZE;
137 }
138
139 /* L1 entry stores a table descriptor */
140 table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
141
142 /* L2 table lookup */
143 if ((size >= L2_XLAT_SIZE) &&
144 IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
145 /* If block address is aligned and size is greater than
146 * or equal to size addressed by each L2 entry, we can
147 * directly store a block desc */
148 desc = base_addr | BLOCK_DESC | attr;
149 table[l2_index] = desc;
150 /* L3 lookup is not required */
151 return L2_XLAT_SIZE;
152 }
153
154 /* L2 entry stores a table descriptor */
155 table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
156
157 /* L3 table lookup */
158 desc = base_addr | PAGE_DESC | attr;
159 table[l3_index] = desc;
160 return L3_XLAT_SIZE;
161 }
162
163 /* Func : sanity_check
164 * Desc : Check address/size alignment of a table or page.
165 */
sanity_check(uint64_t addr,uint64_t size)166 static void sanity_check(uint64_t addr, uint64_t size)
167 {
168 assert(!(addr & GRANULE_SIZE_MASK) &&
169 !(size & GRANULE_SIZE_MASK) &&
170 (addr + size < (1UL << BITS_PER_VA)) &&
171 size >= GRANULE_SIZE);
172 }
173
174 /* Func : get_pte
175 * Desc : Returns the page table entry governing a specific address. */
get_pte(void * addr)176 static uint64_t get_pte(void *addr)
177 {
178 int shift = L0_ADDR_SHIFT;
179 uint64_t *pte = (uint64_t *)_ttb;
180
181 while (1) {
182 int index = ((uintptr_t)addr >> shift) &
183 ((1UL << BITS_RESOLVED_PER_LVL) - 1);
184
185 if ((pte[index] & DESC_MASK) != TABLE_DESC ||
186 shift <= GRANULE_SIZE_SHIFT)
187 return pte[index];
188
189 pte = (uint64_t *)(pte[index] & XLAT_ADDR_MASK);
190 shift -= BITS_RESOLVED_PER_LVL;
191 }
192 }
193
194 /* Func : assert_correct_ttb_mapping
195 * Desc : Asserts that mapping for addr matches the access type used by the
196 * page table walk (i.e. addr is correctly mapped to be part of the TTB). */
assert_correct_ttb_mapping(void * addr)197 static void assert_correct_ttb_mapping(void *addr)
198 {
199 uint64_t pte = get_pte(addr);
200 assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
201 == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS));
202 }
203
204 /* Func : mmu_config_range
205 * Desc : This function repeatedly calls init_xlat_table with the base
206 * address. Based on size returned from init_xlat_table, base_addr is updated
207 * and subsequent calls are made for initializing the xlat table until the whole
208 * region is initialized.
209 */
mmu_config_range(void * start,size_t size,uint64_t tag)210 void mmu_config_range(void *start, size_t size, uint64_t tag)
211 {
212 uint64_t base_addr = (uintptr_t)start;
213 uint64_t temp_size = size;
214
215 printk(BIOS_INFO, "Mapping address range [%p:%p) as ",
216 start, start + size);
217 print_tag(BIOS_INFO, tag);
218
219 sanity_check(base_addr, temp_size);
220
221 while (temp_size)
222 temp_size -= init_xlat_table(base_addr + (size - temp_size),
223 temp_size, tag);
224
225 /* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
226 dsb();
227 tlbiall();
228 dsb();
229 isb();
230 }
231
232 /* Func : mmu_init
233 * Desc : Initialize MMU registers and page table memory region. This must be
234 * called exactly ONCE PER BOOT before trying to configure any mappings.
235 */
mmu_init(void)236 void mmu_init(void)
237 {
238 /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */
239 uint64_t *table = (uint64_t *)_ttb;
240 for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table))
241 table[0] = UNUSED_DESC;
242
243 /* Initialize the root table (L0) to be completely unmapped. */
244 uint64_t *root = setup_new_table(INVALID_DESC, L0_XLAT_SIZE);
245 assert((u8 *)root == _ttb);
246
247 /* Initialize TTBR */
248 raw_write_ttbr0((uintptr_t)root);
249
250 /* Initialize MAIR indices */
251 raw_write_mair(MAIR_ATTRIBUTES);
252
253 /* Initialize TCR flags */
254 raw_write_tcr(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
255 TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
256 TCR_TBI_USED);
257 }
258
259 /* Func : mmu_save_context
260 * Desc : Save mmu context (registers and ttbr base).
261 */
mmu_save_context(struct mmu_context * mmu_context)262 void mmu_save_context(struct mmu_context *mmu_context)
263 {
264 assert(mmu_context);
265
266 /* Back-up MAIR_ATTRIBUTES */
267 mmu_context->mair = raw_read_mair();
268
269 /* Back-up TCR value */
270 mmu_context->tcr = raw_read_tcr();
271 }
272
273 /* Func : mmu_restore_context
274 * Desc : Restore mmu context using input backed-up context
275 */
mmu_restore_context(const struct mmu_context * mmu_context)276 void mmu_restore_context(const struct mmu_context *mmu_context)
277 {
278 assert(mmu_context);
279
280 /* Restore TTBR */
281 raw_write_ttbr0((uintptr_t)_ttb);
282
283 /* Restore MAIR indices */
284 raw_write_mair(mmu_context->mair);
285
286 /* Restore TCR flags */
287 raw_write_tcr(mmu_context->tcr);
288
289 /* invalidate tlb since ttbr is updated. */
290 tlb_invalidate_all();
291 }
292
mmu_enable(void)293 void mmu_enable(void)
294 {
295 assert_correct_ttb_mapping(_ttb);
296 assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));
297
298 uint32_t sctlr = raw_read_sctlr();
299 raw_write_sctlr(sctlr | SCTLR_C | SCTLR_M | SCTLR_I);
300
301 isb();
302 }
303