xref: /aosp_15_r20/external/coreboot/payloads/libpayload/arch/arm64/mmu.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /*
2  *
3  * Copyright 2014 Google Inc.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <assert.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <string.h>
33 
34 #include <arch/mmu.h>
35 #include <arch/lib_helpers.h>
36 #include <arch/cache.h>
37 
38 /* Maximum number of XLAT Tables available based on ttb buffer size */
39 static unsigned int max_tables;
40 /* Address of ttb buffer */
41 static uint64_t *xlat_addr;
42 
43 static int free_idx;
44 
45 /*
46  * We refer to the section ".bss.ttb_buffer" in the linker script for ChromeOS's depthcharge
47  * payload. Please DO NOT change the section name without discussing with us.
48  * Please contact: [email protected] or [email protected]
49  */
50 static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __aligned(GRANULE_SIZE)
51 	__section(".bss.ttb_buffer");
52 
53 static const char * const tag_to_string[] = {
54 	[TYPE_NORMAL_MEM] = "normal",
55 	[TYPE_DEV_MEM] = "device",
56 	[TYPE_DMA_MEM] = "uncached",
57 };
58 
59 /*
60  * The usedmem_ranges is used to describe all the memory ranges that are
61  * actually used by payload i.e. _start -> _end in linker script and the
62  * coreboot tables. This is required for two purposes:
63  * 1) During the pre_sysinfo_scan_mmu_setup, these are the only ranges
64  * initialized in the page table as we do not know the entire memory map.
65  * 2) During the post_sysinfo_scan_mmu_setup, these ranges are used to check if
66  * the DMA buffer is being placed in a sane location and does not overlap any of
67  * the used mem ranges.
68  */
69 static struct mmu_ranges usedmem_ranges;
70 
mmu_error(void)71 static void __attribute__((noreturn)) mmu_error(void)
72 {
73 	halt();
74 }
75 
76 /* Func : get_block_attr
77  * Desc : Get block descriptor attributes based on the value of tag in memrange
78  * region
79  */
get_block_attr(unsigned long tag)80 static uint64_t get_block_attr(unsigned long tag)
81 {
82 	uint64_t attr;
83 
84 	/* We should be in EL2(which is non-secure only) or EL1(non-secure) */
85 	attr = BLOCK_NS;
86 
87 	/* Assuming whole memory is read-write */
88 	attr |= BLOCK_AP_RW;
89 
90 	attr |= BLOCK_ACCESS;
91 
92 	switch (tag) {
93 
94 	case TYPE_NORMAL_MEM:
95 		attr |= BLOCK_SH_INNER_SHAREABLE;
96 		attr |= (BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT);
97 		break;
98 	case TYPE_DEV_MEM:
99 		attr |= BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT;
100 		attr |= BLOCK_XN;
101 		break;
102 	case TYPE_DMA_MEM:
103 		attr |= BLOCK_INDEX_MEM_NORMAL_NC << BLOCK_INDEX_SHIFT;
104 		break;
105 	}
106 
107 	return attr;
108 }
109 
110 /* Func : table_desc_valid
111  * Desc : Check if a table entry contains valid desc
112  */
table_desc_valid(uint64_t desc)113 static uint64_t table_desc_valid(uint64_t desc)
114 {
115 	return((desc & TABLE_DESC) == TABLE_DESC);
116 }
117 
118 /* Func : setup_new_table
119  * Desc : Get next free table from TTB and set it up to match old parent entry.
120  */
setup_new_table(uint64_t desc,size_t xlat_size)121 static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size)
122 {
123 	uint64_t *new, *entry;
124 
125 	assert(free_idx < max_tables);
126 
127 	new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
128 	free_idx++;
129 
130 	if (!desc) {
131 		memset(new, 0, GRANULE_SIZE);
132 	} else {
133 		/* Can reuse old parent entry, but may need to adjust type. */
134 		if (xlat_size == L3_XLAT_SIZE)
135 			desc |= PAGE_DESC;
136 
137 		for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE;
138 		     entry++, desc += xlat_size)
139 			*entry = desc;
140 	}
141 
142 	return new;
143 }
144 
145 /* Func : get_table_from_desc
146  * Desc : Get next level table address from table descriptor
147  */
get_table_from_desc(uint64_t desc)148 static uint64_t *get_table_from_desc(uint64_t desc)
149 {
150 	uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
151 	return ptr;
152 }
153 
154 /* Func: get_next_level_table
155  * Desc: Check if the table entry is a valid descriptor. If not, initialize new
156  * table, update the entry and return the table addr. If valid, return the addr.
157  */
get_next_level_table(uint64_t * ptr,size_t xlat_size)158 static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size)
159 {
160 	uint64_t desc = *ptr;
161 
162 	if (!table_desc_valid(desc)) {
163 		uint64_t *new_table = setup_new_table(desc, xlat_size);
164 		desc = ((uint64_t)new_table) | TABLE_DESC;
165 		*ptr = desc;
166 	}
167 	return get_table_from_desc(desc);
168 }
169 
170 /* Func : init_xlat_table
171  * Desc : Given a base address and size, it identifies the indices within
172  * different level XLAT tables which map the given base addr. Similar to table
173  * walk, except that all invalid entries during the walk are updated
174  * accordingly. On success, it returns the size of the block/page addressed by
175  * the final table.
176  */
init_xlat_table(uint64_t base_addr,uint64_t size,uint64_t tag)177 static uint64_t init_xlat_table(uint64_t base_addr,
178 				uint64_t size,
179 				uint64_t tag)
180 {
181 	uint64_t l0_index = (base_addr & L0_ADDR_MASK) >> L0_ADDR_SHIFT;
182 	uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT;
183 	uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT;
184 	uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT;
185 	uint64_t *table = xlat_addr;
186 	uint64_t desc;
187 	uint64_t attr = get_block_attr(tag);
188 
189 	/* L0 entry stores a table descriptor (doesn't support blocks) */
190 	table = get_next_level_table(&table[l0_index], L1_XLAT_SIZE);
191 
192 	/* L1 table lookup */
193 	if ((size >= L1_XLAT_SIZE) &&
194 	    IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) {
195 			/* If block address is aligned and size is greater than
196 			 * or equal to size addressed by each L1 entry, we can
197 			 * directly store a block desc */
198 			desc = base_addr | BLOCK_DESC | attr;
199 			table[l1_index] = desc;
200 			/* L2 lookup is not required */
201 			return L1_XLAT_SIZE;
202 	}
203 
204 	/* L1 entry stores a table descriptor */
205 	table = get_next_level_table(&table[l1_index], L2_XLAT_SIZE);
206 
207 	/* L2 table lookup */
208 	if ((size >= L2_XLAT_SIZE) &&
209 	    IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) {
210 		/* If block address is aligned and size is greater than
211 		 * or equal to size addressed by each L2 entry, we can
212 		 * directly store a block desc */
213 		desc = base_addr | BLOCK_DESC | attr;
214 		table[l2_index] = desc;
215 		/* L3 lookup is not required */
216 		return L2_XLAT_SIZE;
217 	}
218 
219 	/* L2 entry stores a table descriptor */
220 	table = get_next_level_table(&table[l2_index], L3_XLAT_SIZE);
221 
222 	/* L3 table lookup */
223 	desc = base_addr | PAGE_DESC | attr;
224 	table[l3_index] = desc;
225 	return L3_XLAT_SIZE;
226 }
227 
228 /* Func : sanity_check
229  * Desc : Check address/size alignment of a table or page.
230  */
sanity_check(uint64_t addr,uint64_t size)231 static void sanity_check(uint64_t addr, uint64_t size)
232 {
233 	assert(!(addr & GRANULE_SIZE_MASK) &&
234 	       !(size & GRANULE_SIZE_MASK) &&
235 	       (addr + size < (1UL << BITS_PER_VA)) &&
236 	       size >= GRANULE_SIZE);
237 }
238 
239 /* Func : mmu_config_range
240  * Desc : This function repeatedly calls init_xlat_table with the base
241  * address. Based on size returned from init_xlat_table, base_addr is updated
242  * and subsequent calls are made for initializing the xlat table until the whole
243  * region is initialized.
244  */
mmu_config_range(void * start,size_t size,uint64_t tag)245 void mmu_config_range(void *start, size_t size, uint64_t tag)
246 {
247 	uint64_t base_addr = (uintptr_t)start;
248 	uint64_t temp_size = size;
249 
250 	assert(tag < ARRAY_SIZE(tag_to_string));
251 	printf("Libpayload: ARM64 MMU: Mapping address range [%p:%p) as %s\n",
252 	       start, start + size, tag_to_string[tag]);
253 	sanity_check(base_addr, temp_size);
254 
255 	while (temp_size)
256 		temp_size -= init_xlat_table(base_addr + (size - temp_size),
257 					     temp_size, tag);
258 
259 	/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
260 	dsb();
261 	tlbiall_el2();
262 	dsb();
263 	isb();
264 }
265 
266 /* Func : mmu_init
267  * Desc : Initialize mmu based on the mmu_memrange passed. ttb_buffer is used as
268  * the base address for xlat tables. TTB_DEFAULT_SIZE defines the max number of
269  * tables that can be used
270  * Assuming that memory 0-4GiB is device memory.
271  */
mmu_init(struct mmu_ranges * mmu_ranges)272 uint64_t mmu_init(struct mmu_ranges *mmu_ranges)
273 {
274 	int i = 0;
275 
276 	xlat_addr = (uint64_t *)&ttb_buffer;
277 
278 	memset((void*)xlat_addr, 0, GRANULE_SIZE);
279 	max_tables = (TTB_DEFAULT_SIZE >> GRANULE_SIZE_SHIFT);
280 	free_idx = 1;
281 
282 	printf("Libpayload ARM64: TTB_BUFFER: %p Max Tables: %d\n",
283 	       (void*)xlat_addr, max_tables);
284 
285 	/*
286 	 * To keep things simple we start with mapping the entire base 4GB as
287 	 * device memory. This accommodates various architectures' default
288 	 * settings (for instance rk3399 mmio starts at 0xf8000000); it is
289 	 * fine tuned (e.g. mapping DRAM areas as write-back) later in the
290 	 * boot process.
291 	 */
292 	mmu_config_range(NULL, 0x100000000, TYPE_DEV_MEM);
293 
294 	for (; i < mmu_ranges->used; i++)
295 		mmu_config_range((void *)mmu_ranges->entries[i].base,
296 				 mmu_ranges->entries[i].size,
297 				 mmu_ranges->entries[i].type);
298 
299 	printf("Libpayload ARM64: MMU init done\n");
300 	return 0;
301 }
302 
is_mmu_enabled(void)303 static uint32_t is_mmu_enabled(void)
304 {
305 	uint32_t sctlr;
306 
307 	sctlr = raw_read_sctlr_el2();
308 
309 	return (sctlr & SCTLR_M);
310 }
311 
312 /*
313  * Func: mmu_enable
314  * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits
315  * in SCTLR
316  */
mmu_enable(void)317 void mmu_enable(void)
318 {
319 	uint32_t sctlr;
320 
321 	/* Initialize MAIR indices */
322 	raw_write_mair_el2(MAIR_ATTRIBUTES);
323 
324 	/* Invalidate TLBs */
325 	tlbiall_el2();
326 
327 	/* Initialize TCR flags */
328 	raw_write_tcr_el2(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
329 			      TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
330 			      TCR_TBI_USED);
331 
332 	/* Initialize TTBR */
333 	raw_write_ttbr0_el2((uintptr_t)xlat_addr);
334 
335 	/* Ensure system register writes are committed before enabling MMU */
336 	isb();
337 
338 	/* Enable MMU */
339 	sctlr = raw_read_sctlr_el2();
340 	sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
341 	raw_write_sctlr_el2(sctlr);
342 
343 	isb();
344 
345 	if(is_mmu_enabled())
346 		printf("ARM64: MMU enable done\n");
347 	else
348 		printf("ARM64: MMU enable failed\n");
349 }
350 
351 /*
352  * Func: mmu_add_memrange
353  * Desc: Adds a new memory range
354  */
mmu_add_memrange(struct mmu_ranges * r,uint64_t base,uint64_t size,uint64_t type)355 static struct mmu_memrange *mmu_add_memrange(struct mmu_ranges *r,
356 					     uint64_t base, uint64_t size,
357 					     uint64_t type)
358 {
359 	struct mmu_memrange *curr = NULL;
360 	int i = r->used;
361 
362 	if (i < ARRAY_SIZE(r->entries)) {
363 		curr = &r->entries[i];
364 		curr->base = base;
365 		curr->size = size;
366 		curr->type = type;
367 
368 		r->used = i + 1;
369 	}
370 
371 	return curr;
372 }
373 
374 /* Structure to define properties of new memrange request */
375 struct mmu_new_range_prop {
376 	/* Type of memrange */
377 	uint64_t type;
378 	/* Size of the range */
379 	uint64_t size;
380 	/*
381 	 * If any restrictions on the max addr limit(This addr is exclusive for
382 	 * the range), else 0
383 	 */
384 	uint64_t lim_excl;
385 	/* If any restrictions on alignment of the range base, else 0 */
386 	uint64_t align;
387 	/*
388 	 * Function to test whether selected range is fine.
389 	 * NULL=any range is fine
390 	 * Return value 1=valid range, 0=otherwise
391 	 */
392 	int (*is_valid_range)(uint64_t, uint64_t);
393 	/* From what type of source range should this range be extracted */
394 	uint64_t src_type;
395 };
396 
397 /*
398  * Func: mmu_is_range_free
399  * Desc: We need to ensure that the new range being allocated doesn't overlap
400  * with any used memory range. Basically:
401  * 1. Memory ranges used by the payload (usedmem_ranges)
402  * 2. Any area that falls below _end symbol in linker script (Kernel needs to be
403  * loaded in lower areas of memory, So, the payload linker script can have
404  * kernel memory below _start and _end. Thus, we want to make sure we do not
405  * step in those areas as well.
406  * Returns: 1 on success, 0 on error
407  * ASSUMPTION: All the memory used by payload resides below the program
408  * proper. If there is any memory used above the _end symbol, then it should be
409  * marked as used memory in usedmem_ranges during the presysinfo_scan.
410  */
mmu_is_range_free(uint64_t r_base,uint64_t r_end)411 static int mmu_is_range_free(uint64_t r_base,
412 			     uint64_t r_end)
413 {
414 	uint64_t payload_end = (uint64_t)&_end;
415 	uint64_t i;
416 	struct mmu_memrange *r = &usedmem_ranges.entries[0];
417 
418 	/* Allocate memranges only above payload */
419 	if ((r_base <= payload_end) || (r_end <= payload_end))
420 		return 0;
421 
422 	for (i = 0; i < usedmem_ranges.used; i++) {
423 		uint64_t start = r[i].base;
424 		uint64_t end = start + r[i].size;
425 
426 		if ((start < r_end) && (end > r_base))
427 			return 0;
428 	}
429 
430 	return 1;
431 }
432 
433 /*
434  * Func: mmu_get_new_range
435  * Desc: Add a requested new memrange. We take as input set of all memranges and
436  * a structure to define the new memrange properties i.e. its type, size,
437  * max_addr it can grow upto, alignment restrictions, source type to take range
438  * from and finally a function pointer to check if the chosen range is valid.
439  */
mmu_get_new_range(struct mmu_ranges * mmu_ranges,struct mmu_new_range_prop * new)440 static struct mmu_memrange *mmu_get_new_range(struct mmu_ranges *mmu_ranges,
441 					      struct mmu_new_range_prop *new)
442 {
443 	int i = 0;
444 	struct mmu_memrange *r = &mmu_ranges->entries[0];
445 
446 	if (new->size == 0) {
447 		printf("MMU Error: Invalid range size\n");
448 		return NULL;
449 	}
450 
451 	for (; i < mmu_ranges->used; i++) {
452 
453 		if ((r[i].type != new->src_type) ||
454 		    (r[i].size < new->size) ||
455 		    (new->lim_excl && (r[i].base >= new->lim_excl)))
456 			continue;
457 
458 		uint64_t base_addr;
459 		uint64_t range_end_addr = r[i].base + r[i].size;
460 		uint64_t end_addr = range_end_addr;
461 
462 		/* Make sure we do not go above max if it is non-zero */
463 		if (new->lim_excl && (end_addr >= new->lim_excl))
464 			end_addr = new->lim_excl;
465 
466 		while (1) {
467 			/*
468 			 * In case of alignment requirement,
469 			 * if end_addr is aligned, then base_addr will be too.
470 			 */
471 			if (new->align)
472 				end_addr = ALIGN_DOWN(end_addr, new->align);
473 
474 			base_addr = end_addr - new->size;
475 
476 			if (base_addr < r[i].base)
477 				break;
478 
479 			/*
480 			 * If the selected range is not used and valid for the
481 			 * user, move ahead with it
482 			 */
483 			if (mmu_is_range_free(base_addr, end_addr) &&
484 			    ((new->is_valid_range == NULL) ||
485 			     new->is_valid_range(base_addr, end_addr)))
486 				break;
487 
488 			/* Drop to the next address. */
489 			end_addr -= 1;
490 		}
491 
492 		if (base_addr < r[i].base)
493 			continue;
494 
495 		if (end_addr != range_end_addr) {
496 			/* Add a new memrange since we split up one
497 			 * range crossing the 4GiB boundary or doing an
498 			 * ALIGN_DOWN on end_addr.
499 			 */
500 			r[i].size -= (range_end_addr - end_addr);
501 			if (mmu_add_memrange(mmu_ranges, end_addr,
502 					     range_end_addr - end_addr,
503 					     r[i].type) == NULL)
504 				mmu_error();
505 		}
506 
507 		if (r[i].size == new->size) {
508 			r[i].type = new->type;
509 			return &r[i];
510 		}
511 
512 		r[i].size -= new->size;
513 
514 		r = mmu_add_memrange(mmu_ranges, base_addr, new->size,
515 				     new->type);
516 
517 		if (r == NULL)
518 			mmu_error();
519 
520 		return r;
521 	}
522 
523 	/* Should never reach here if everything went fine */
524 	printf("ARM64 ERROR: No region allocated\n");
525 	return NULL;
526 }
527 
528 /*
529  * Func: mmu_alloc_range
530  * Desc: Call get_new_range to get a new memrange which is unused and mark it as
531  * used to avoid same range being allocated for different purposes.
532  */
mmu_alloc_range(struct mmu_ranges * mmu_ranges,struct mmu_new_range_prop * p)533 static struct mmu_memrange *mmu_alloc_range(struct mmu_ranges *mmu_ranges,
534 					    struct mmu_new_range_prop *p)
535 {
536 	struct mmu_memrange *r = mmu_get_new_range(mmu_ranges, p);
537 
538 	if (r == NULL)
539 		return NULL;
540 
541 	/*
542 	 * Mark this memrange as used memory. Important since function
543 	 * can be called multiple times and we do not want to reuse some
544 	 * range already allocated.
545 	 */
546 	if (mmu_add_memrange(&usedmem_ranges, r->base, r->size, r->type)
547 	    == NULL)
548 		mmu_error();
549 
550 	return r;
551 }
552 
553 /*
554  * Func: mmu_add_dma_range
555  * Desc: Add a memrange for dma operations. This is special because we want to
556  * initialize this memory as non-cacheable. We have a constraint that the DMA
557  * buffer should be below 4GiB(32-bit only). So, we lookup a TYPE_NORMAL_MEM
558  * from the lowest available addresses and align it to page size i.e. 64KiB.
559  */
mmu_add_dma_range(struct mmu_ranges * mmu_ranges)560 static struct mmu_memrange *mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
561 {
562 	struct mmu_new_range_prop prop;
563 
564 	prop.type = TYPE_DMA_MEM;
565 	/* DMA_DEFAULT_SIZE is multiple of GRANULE_SIZE */
566 	assert((DMA_DEFAULT_SIZE % GRANULE_SIZE) == 0);
567 	prop.size = DMA_DEFAULT_SIZE;
568 	prop.lim_excl = (uint64_t)CONFIG_LP_DMA_LIM_EXCL * MiB;
569 	prop.align = GRANULE_SIZE;
570 	prop.is_valid_range = NULL;
571 	prop.src_type = TYPE_NORMAL_MEM;
572 
573 	return mmu_alloc_range(mmu_ranges, &prop);
574 }
575 
_mmu_add_fb_range(uint32_t size,struct mmu_ranges * mmu_ranges)576 static struct mmu_memrange *_mmu_add_fb_range(
577 		uint32_t size,
578 		struct mmu_ranges *mmu_ranges)
579 {
580 	struct mmu_new_range_prop prop;
581 
582 	prop.type = TYPE_DMA_MEM;
583 
584 	prop.size = size;
585 	prop.lim_excl = MIN_64_BIT_ADDR;
586 	prop.align = MB_SIZE;
587 	prop.is_valid_range = NULL;
588 	prop.src_type = TYPE_NORMAL_MEM;
589 
590 	return mmu_alloc_range(mmu_ranges, &prop);
591 }
592 
593 /*
594  * Func: mmu_extract_ranges
595  * Desc: Assumption is that coreboot tables have memranges in sorted
596  * order. So, if there is an opportunity to combine ranges, we do that as
597  * well. Memranges are initialized for both CB_MEM_RAM and CB_MEM_TABLE as
598  * TYPE_NORMAL_MEM.
599  */
mmu_extract_ranges(struct memrange * cb_ranges,uint64_t ncb,struct mmu_ranges * mmu_ranges)600 static void mmu_extract_ranges(struct memrange *cb_ranges,
601 			       uint64_t ncb,
602 			       struct mmu_ranges *mmu_ranges)
603 {
604 	int i = 0;
605 	struct mmu_memrange *prev_range = NULL;
606 
607 	/* Extract memory ranges to be mapped */
608 	for (; i < ncb; i++) {
609 		switch (cb_ranges[i].type) {
610 		case CB_MEM_RAM:
611 		case CB_MEM_TABLE:
612 			if (prev_range && (prev_range->base + prev_range->size
613 					   == cb_ranges[i].base)) {
614 				prev_range->size += cb_ranges[i].size;
615 			} else {
616 				prev_range = mmu_add_memrange(mmu_ranges,
617 							      cb_ranges[i].base,
618 							      cb_ranges[i].size,
619 							      TYPE_NORMAL_MEM);
620 				if (prev_range == NULL)
621 					mmu_error();
622 			}
623 			break;
624 		default:
625 			break;
626 		}
627 	}
628 }
629 
mmu_add_fb_range(struct mmu_ranges * mmu_ranges)630 static void mmu_add_fb_range(struct mmu_ranges *mmu_ranges)
631 {
632 	struct mmu_memrange *fb_range;
633 	struct cb_framebuffer *framebuffer = &lib_sysinfo.framebuffer;
634 	uint32_t fb_size;
635 
636 	/* Check whether framebuffer is needed */
637 	fb_size = framebuffer->bytes_per_line * framebuffer->y_resolution;
638 	if (!fb_size)
639 		return;
640 
641 	/* make sure to allocate a size of multiple of GRANULE_SIZE */
642 	fb_size = ALIGN_UP(fb_size, GRANULE_SIZE);
643 
644 	/* framebuffer address has been set already, so just add it as DMA */
645 	if (framebuffer->physical_address) {
646 		if (mmu_add_memrange(mmu_ranges,
647 		    framebuffer->physical_address,
648 		    fb_size,
649 		    TYPE_DMA_MEM) == NULL)
650 			mmu_error();
651 		return;
652 	}
653 
654 	/* Allocate framebuffer */
655 	fb_range = _mmu_add_fb_range(fb_size, mmu_ranges);
656 	if (fb_range == NULL)
657 		mmu_error();
658 
659 	framebuffer->physical_address = fb_range->base;
660 }
661 
662 /*
663  * Func: mmu_init_ranges
664  * Desc: Initialize mmu_memranges based on the memranges obtained from coreboot
665  * tables. Also, initialize dma memrange and xlat_addr for ttb buffer.
666  */
mmu_init_ranges_from_sysinfo(struct memrange * cb_ranges,uint64_t ncb,struct mmu_ranges * mmu_ranges)667 struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
668 						  uint64_t ncb,
669 						  struct mmu_ranges *mmu_ranges)
670 {
671 	struct mmu_memrange *dma_range;
672 
673 	/* Initialize mmu_ranges to contain no entries. */
674 	mmu_ranges->used = 0;
675 
676 	/* Extract ranges from memrange in lib_sysinfo */
677 	mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
678 
679 	/* Get a range for dma */
680 	dma_range = mmu_add_dma_range(mmu_ranges);
681 
682 	/* Get a range for framebuffer */
683 	mmu_add_fb_range(mmu_ranges);
684 
685 	if (dma_range == NULL)
686 		mmu_error();
687 
688 	return dma_range;
689 }
690 
691 /*
692  * Func: mmu_presysinfo_memory_used
693  * Desc: Initializes all the memory used for presysinfo page table
694  * initialization and enabling of MMU. All these ranges are stored in
695  * usedmem_ranges. usedmem_ranges plays an important role in selecting the dma
696  * buffer as well since we check the dma buffer range against the used memory
697  * ranges to prevent any overstepping.
698  */
mmu_presysinfo_memory_used(uint64_t base,uint64_t size)699 void mmu_presysinfo_memory_used(uint64_t base, uint64_t size)
700 {
701 	uint64_t range_base;
702 
703 	range_base = ALIGN_DOWN(base, GRANULE_SIZE);
704 
705 	size += (base - range_base);
706 	size = ALIGN_UP(size, GRANULE_SIZE);
707 
708 	mmu_add_memrange(&usedmem_ranges, range_base, size, TYPE_NORMAL_MEM);
709 }
710 
mmu_presysinfo_enable(void)711 void mmu_presysinfo_enable(void)
712 {
713 	mmu_init(&usedmem_ranges);
714 	mmu_enable();
715 }
716 
mmu_get_used_ranges(void)717 const struct mmu_ranges *mmu_get_used_ranges(void)
718 {
719 	return &usedmem_ranges;
720 }
721