xref: /aosp_15_r20/external/coreboot/src/lib/imd.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <assert.h>
4 #include <cbmem.h>
5 #include <console/console.h>
6 #include <imd.h>
7 #include <string.h>
8 #include <types.h>
9 #include <imd_private.h>
10 
11 
12 /* For more details on implementation and usage please see the imd.h header. */
13 
relative_pointer(void * base,ssize_t offset)14 static void *relative_pointer(void *base, ssize_t offset)
15 {
16 	intptr_t b = (intptr_t)base;
17 	b += offset;
18 	return (void *)b;
19 }
20 
imd_root_pointer_valid(const struct imd_root_pointer * rp)21 static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
22 {
23 	return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
24 }
25 
imdr_root(const struct imdr * imdr)26 static struct imd_root *imdr_root(const struct imdr *imdr)
27 {
28 	return imdr->r;
29 }
30 
31 /*
32  * The root pointer is relative to the upper limit of the imd. i.e. It sits
33  * just below the upper limit.
34  */
imdr_get_root_pointer(const struct imdr * imdr)35 static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
36 {
37 	struct imd_root_pointer *rp;
38 
39 	rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
40 
41 	return rp;
42 }
43 
imd_link_root(struct imd_root_pointer * rp,struct imd_root * r)44 static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
45 {
46 	rp->magic = IMD_ROOT_PTR_MAGIC;
47 	rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
48 }
49 
root_last_entry(struct imd_root * r)50 static struct imd_entry *root_last_entry(struct imd_root *r)
51 {
52 	return &r->entries[r->num_entries - 1];
53 }
54 
root_num_entries(size_t root_size)55 static size_t root_num_entries(size_t root_size)
56 {
57 	size_t entries_size;
58 
59 	entries_size = root_size;
60 	entries_size -= sizeof(struct imd_root_pointer);
61 	entries_size -= sizeof(struct imd_root);
62 
63 	return entries_size / sizeof(struct imd_entry);
64 }
65 
imd_root_data_left(struct imd_root * r)66 static size_t imd_root_data_left(struct imd_root *r)
67 {
68 	struct imd_entry *last_entry;
69 
70 	last_entry = root_last_entry(r);
71 
72 	if (r->max_offset != 0)
73 		return last_entry->start_offset - r->max_offset;
74 
75 	return ~(size_t)0;
76 }
77 
root_is_locked(const struct imd_root * r)78 static bool root_is_locked(const struct imd_root *r)
79 {
80 	return !!(r->flags & IMD_FLAG_LOCKED);
81 }
82 
imd_entry_assign(struct imd_entry * e,uint32_t id,ssize_t offset,size_t size)83 static void imd_entry_assign(struct imd_entry *e, uint32_t id,
84 				ssize_t offset, size_t size)
85 {
86 	e->magic = IMD_ENTRY_MAGIC;
87 	e->start_offset = offset;
88 	e->size = size;
89 	e->id = id;
90 }
91 
imdr_init(struct imdr * ir,void * upper_limit)92 static void imdr_init(struct imdr *ir, void *upper_limit)
93 {
94 	uintptr_t limit = (uintptr_t)upper_limit;
95 	/* Upper limit is aligned down to 4KiB */
96 	ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
97 	ir->r = NULL;
98 }
99 
imdr_create_empty(struct imdr * imdr,size_t root_size,size_t entry_align)100 static int imdr_create_empty(struct imdr *imdr, size_t root_size,
101 				size_t entry_align)
102 {
103 	struct imd_root_pointer *rp;
104 	struct imd_root *r;
105 	struct imd_entry *e;
106 	ssize_t root_offset;
107 
108 	if (!imdr->limit)
109 		return -1;
110 
111 	/* root_size and entry_align should be a power of 2. */
112 	assert(IS_POWER_OF_2(root_size));
113 	assert(IS_POWER_OF_2(entry_align));
114 
115 	/*
116 	 * root_size needs to be large enough to accommodate root pointer and
117 	 * root book keeping structure. Furthermore, there needs to be a space
118 	 * for at least one entry covering root region. The caller needs to
119 	 * ensure there's enough room for tracking individual allocations.
120 	 */
121 	if (root_size < (sizeof(*rp) + sizeof(*r) + sizeof(*e)))
122 		return -1;
123 
124 	/* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
125 	 */
126 	if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
127 		return -1;
128 
129 	/* Additionally, don't handle an entry alignment > root_size. */
130 	if (entry_align > root_size)
131 		return -1;
132 
133 	rp = imdr_get_root_pointer(imdr);
134 
135 	root_offset = -(ssize_t)root_size;
136 	/* Set root pointer. */
137 	imdr->r = relative_pointer((void *)imdr->limit, root_offset);
138 	r = imdr_root(imdr);
139 	imd_link_root(rp, r);
140 
141 	memset(r, 0, sizeof(*r));
142 	r->entry_align = entry_align;
143 
144 	/* Calculate size left for entries. */
145 	r->max_entries = root_num_entries(root_size);
146 
147 	/* Fill in first entry covering the root region. */
148 	r->num_entries = 1;
149 	e = &r->entries[0];
150 	imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
151 
152 	printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
153 
154 	return 0;
155 }
156 
imdr_recover(struct imdr * imdr)157 static int imdr_recover(struct imdr *imdr)
158 {
159 	struct imd_root_pointer *rp;
160 	struct imd_root *r;
161 	uintptr_t low_limit;
162 	size_t i;
163 
164 	if (!imdr->limit)
165 		return -1;
166 
167 	rp = imdr_get_root_pointer(imdr);
168 
169 	if (!imd_root_pointer_valid(rp))
170 		return -1;
171 
172 	r = relative_pointer(rp, rp->root_offset);
173 
174 	/* Ensure that root is just under the root pointer */
175 	if ((intptr_t)rp - (intptr_t)&r->entries[r->max_entries] > sizeof(struct imd_entry))
176 		return -1;
177 
178 	if (r->num_entries > r->max_entries)
179 		return -1;
180 
181 	/* Entry alignment should be power of 2. */
182 	if (!IS_POWER_OF_2(r->entry_align))
183 		return -1;
184 
185 	low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
186 
187 	/* If no max_offset then lowest limit is 0. */
188 	if (low_limit == (uintptr_t)r)
189 		low_limit = 0;
190 
191 	for (i = 0; i < r->num_entries; i++) {
192 		uintptr_t start_addr;
193 		const struct imd_entry *e = &r->entries[i];
194 
195 		if (e->magic != IMD_ENTRY_MAGIC)
196 			return -1;
197 
198 		start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
199 		if (start_addr  < low_limit)
200 			return -1;
201 		if (start_addr >= imdr->limit ||
202 				(start_addr + e->size) > imdr->limit)
203 			return -1;
204 	}
205 
206 	/* Set root pointer. */
207 	imdr->r = r;
208 
209 	return 0;
210 }
211 
imdr_entry_find(const struct imdr * imdr,uint32_t id)212 static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
213 						uint32_t id)
214 {
215 	struct imd_root *r;
216 	struct imd_entry *e;
217 	size_t i;
218 
219 	r = imdr_root(imdr);
220 
221 	if (r == NULL)
222 		return NULL;
223 
224 	e = NULL;
225 	/* Skip first entry covering the root. */
226 	for (i = 1; i < r->num_entries; i++) {
227 		if (id != r->entries[i].id)
228 			continue;
229 		e = &r->entries[i];
230 		break;
231 	}
232 
233 	return e;
234 }
235 
imdr_limit_size(struct imdr * imdr,size_t max_size)236 static int imdr_limit_size(struct imdr *imdr, size_t max_size)
237 {
238 	struct imd_root *r;
239 	ssize_t smax_size;
240 	size_t root_size;
241 
242 	r = imdr_root(imdr);
243 	if (r == NULL)
244 		return -1;
245 
246 	root_size = imdr->limit - (uintptr_t)r;
247 
248 	if (max_size < root_size)
249 		return -1;
250 
251 	/* Take into account the root size. */
252 	smax_size = max_size - root_size;
253 	smax_size = -smax_size;
254 
255 	r->max_offset = smax_size;
256 
257 	return 0;
258 }
259 
imdr_entry_size(const struct imd_entry * e)260 static size_t imdr_entry_size(const struct imd_entry *e)
261 {
262 	return e->size;
263 }
264 
imdr_entry_at(const struct imdr * imdr,const struct imd_entry * e)265 static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
266 {
267 	return relative_pointer(imdr_root(imdr), e->start_offset);
268 }
269 
imd_entry_add_to_root(struct imd_root * r,uint32_t id,size_t size)270 static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
271 						size_t size)
272 {
273 	struct imd_entry *entry;
274 	struct imd_entry *last_entry;
275 	ssize_t e_offset;
276 	size_t used_size;
277 
278 	if (r->num_entries == r->max_entries)
279 		return NULL;
280 
281 	/* Determine total size taken up by entry. */
282 	used_size = ALIGN_UP(size, r->entry_align);
283 
284 	/* See if size overflows imd total size. */
285 	if (used_size > imd_root_data_left(r))
286 		return NULL;
287 
288 	/*
289 	 * Determine if offset field overflows. All offsets should be lower
290 	 * than the previous one.
291 	 */
292 	last_entry = root_last_entry(r);
293 	e_offset = last_entry->start_offset;
294 	e_offset -= (ssize_t)used_size;
295 	if (e_offset >= last_entry->start_offset)
296 		return NULL;
297 
298 	entry = root_last_entry(r) + 1;
299 	r->num_entries++;
300 
301 	imd_entry_assign(entry, id, e_offset, size);
302 
303 	return entry;
304 }
305 
imdr_entry_add(const struct imdr * imdr,uint32_t id,size_t size)306 static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
307 						uint32_t id, size_t size)
308 {
309 	struct imd_root *r;
310 
311 	r = imdr_root(imdr);
312 
313 	if (r == NULL)
314 		return NULL;
315 
316 	if (root_is_locked(r))
317 		return NULL;
318 
319 	return imd_entry_add_to_root(r, id, size);
320 }
321 
imdr_has_entry(const struct imdr * imdr,const struct imd_entry * e)322 static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
323 {
324 	struct imd_root *r;
325 	size_t idx;
326 
327 	r = imdr_root(imdr);
328 	if (r == NULL)
329 		return false;
330 
331 	/* Determine if the entry is within this root structure. */
332 	idx = e - &r->entries[0];
333 	if (idx >= r->num_entries)
334 		return false;
335 
336 	return true;
337 }
338 
imd_entry_to_imdr(const struct imd * imd,const struct imd_entry * entry)339 static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
340 						const struct imd_entry *entry)
341 {
342 	if (imdr_has_entry(&imd->lg, entry))
343 		return &imd->lg;
344 
345 	if (imdr_has_entry(&imd->sm, entry))
346 		return &imd->sm;
347 
348 	return NULL;
349 }
350 
351 /* Initialize imd handle. */
imd_handle_init(struct imd * imd,void * upper_limit)352 void imd_handle_init(struct imd *imd, void *upper_limit)
353 {
354 	imdr_init(&imd->lg, upper_limit);
355 	imdr_init(&imd->sm, NULL);
356 }
357 
imd_handle_init_partial_recovery(struct imd * imd)358 void imd_handle_init_partial_recovery(struct imd *imd)
359 {
360 	const struct imd_entry *e;
361 	struct imd_root_pointer *rp;
362 	struct imdr *imdr;
363 
364 	if (imd->lg.limit == 0)
365 		return;
366 
367 	imd_handle_init(imd, (void *)imd->lg.limit);
368 
369 	/* Initialize root pointer for the large regions. */
370 	imdr = &imd->lg;
371 	rp = imdr_get_root_pointer(imdr);
372 	imdr->r = relative_pointer(rp, rp->root_offset);
373 
374 	e = imdr_entry_find(imdr, SMALL_REGION_ID);
375 
376 	if (e == NULL)
377 		return;
378 
379 	imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
380 	imd->sm.limit += imdr_entry_size(e);
381 	imdr = &imd->sm;
382 	rp = imdr_get_root_pointer(imdr);
383 	imdr->r = relative_pointer(rp, rp->root_offset);
384 }
385 
imd_create_empty(struct imd * imd,size_t root_size,size_t entry_align)386 int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
387 {
388 	return imdr_create_empty(&imd->lg, root_size, entry_align);
389 }
390 
imd_create_tiered_empty(struct imd * imd,size_t lg_root_size,size_t lg_entry_align,size_t sm_root_size,size_t sm_entry_align)391 int imd_create_tiered_empty(struct imd *imd,
392 				size_t lg_root_size, size_t lg_entry_align,
393 				size_t sm_root_size, size_t sm_entry_align)
394 {
395 	size_t sm_region_size;
396 	const struct imd_entry *e;
397 	struct imdr *imdr;
398 
399 	imdr = &imd->lg;
400 
401 	if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
402 		return -1;
403 
404 	/* Calculate the size of the small region to request. */
405 	sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
406 	sm_region_size += sm_root_size;
407 	sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
408 
409 	/* Add a new entry to the large region to cover the root and entries. */
410 	e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
411 
412 	if (e == NULL)
413 		goto fail;
414 
415 	imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
416 	imd->sm.limit += sm_region_size;
417 
418 	if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
419 		imdr_limit_size(&imd->sm, sm_region_size))
420 		goto fail;
421 
422 	return 0;
423 fail:
424 	imd_handle_init(imd, (void *)imdr->limit);
425 	return -1;
426 }
427 
imd_recover(struct imd * imd)428 int imd_recover(struct imd *imd)
429 {
430 	const struct imd_entry *e;
431 	uintptr_t small_upper_limit;
432 	struct imdr *imdr;
433 
434 	imdr = &imd->lg;
435 	if (imdr_recover(imdr) != 0)
436 		return -1;
437 
438 	/* Determine if small region is present. */
439 	e = imdr_entry_find(imdr, SMALL_REGION_ID);
440 
441 	if (e == NULL)
442 		return 0;
443 
444 	small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
445 	small_upper_limit += imdr_entry_size(e);
446 
447 	imd->sm.limit = small_upper_limit;
448 
449 	/* Tear down any changes on failure. */
450 	if (imdr_recover(&imd->sm) != 0) {
451 		imd_handle_init(imd, (void *)imd->lg.limit);
452 		return -1;
453 	}
454 
455 	return 0;
456 }
457 
imd_limit_size(struct imd * imd,size_t max_size)458 int imd_limit_size(struct imd *imd, size_t max_size)
459 {
460 	return imdr_limit_size(&imd->lg, max_size);
461 }
462 
imd_lockdown(struct imd * imd)463 int imd_lockdown(struct imd *imd)
464 {
465 	struct imd_root *r;
466 
467 	r = imdr_root(&imd->lg);
468 	if (r == NULL)
469 		return -1;
470 
471 	r->flags |= IMD_FLAG_LOCKED;
472 
473 	r = imdr_root(&imd->sm);
474 	if (r != NULL)
475 		r->flags |= IMD_FLAG_LOCKED;
476 
477 	return 0;
478 }
479 
imd_region_used(struct imd * imd,void ** base,size_t * size)480 int imd_region_used(struct imd *imd, void **base, size_t *size)
481 {
482 	struct imd_root *r;
483 	struct imd_entry *e;
484 	void *low_addr;
485 	size_t sz_used;
486 
487 	if (!imd->lg.limit)
488 		return -1;
489 
490 	r = imdr_root(&imd->lg);
491 
492 	if (r == NULL)
493 		return -1;
494 
495 	/* Use last entry to obtain lowest address. */
496 	e = root_last_entry(r);
497 
498 	low_addr = relative_pointer(r, e->start_offset);
499 
500 	/* Total size used is the last entry's base up to the limit. */
501 	sz_used = imd->lg.limit - (uintptr_t)low_addr;
502 
503 	*base = low_addr;
504 	*size = sz_used;
505 
506 	return 0;
507 }
508 
imd_entry_add(const struct imd * imd,uint32_t id,size_t size)509 const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
510 					size_t size)
511 {
512 	struct imd_root *r;
513 	const struct imdr *imdr;
514 	const struct imd_entry *e = NULL;
515 
516 	/*
517 	 * Determine if requested size is less than 1/4 of small data
518 	 * region is left.
519 	 */
520 	imdr = &imd->sm;
521 	r = imdr_root(imdr);
522 
523 	/* No small region. Use the large region. */
524 	if (r == NULL)
525 		return imdr_entry_add(&imd->lg, id, size);
526 	else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
527 		e = imdr_entry_add(imdr, id, size);
528 
529 	/* Fall back on large region allocation. */
530 	if (e == NULL)
531 		e = imdr_entry_add(&imd->lg, id, size);
532 
533 	return e;
534 }
535 
imd_entry_find(const struct imd * imd,uint32_t id)536 const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
537 {
538 	const struct imd_entry *e;
539 
540 	/* Many of the smaller allocations are used a lot. Therefore, try
541 	 * the small region first. */
542 	e = imdr_entry_find(&imd->sm, id);
543 
544 	if (e == NULL)
545 		e = imdr_entry_find(&imd->lg, id);
546 
547 	return e;
548 }
549 
imd_entry_find_or_add(const struct imd * imd,uint32_t id,size_t size)550 const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
551 						uint32_t id, size_t size)
552 {
553 	const struct imd_entry *e;
554 
555 	e = imd_entry_find(imd, id);
556 
557 	if (e != NULL)
558 		return e;
559 
560 	return imd_entry_add(imd, id, size);
561 }
562 
imd_entry_size(const struct imd_entry * entry)563 size_t imd_entry_size(const struct imd_entry *entry)
564 {
565 	return imdr_entry_size(entry);
566 }
567 
imd_entry_at(const struct imd * imd,const struct imd_entry * entry)568 void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
569 {
570 	const struct imdr *imdr;
571 
572 	imdr = imd_entry_to_imdr(imd, entry);
573 
574 	if (imdr == NULL)
575 		return NULL;
576 
577 	return imdr_entry_at(imdr, entry);
578 }
579 
imd_entry_id(const struct imd_entry * entry)580 uint32_t imd_entry_id(const struct imd_entry *entry)
581 {
582 	return entry->id;
583 }
584 
imd_entry_remove(const struct imd * imd,const struct imd_entry * entry)585 int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
586 {
587 	struct imd_root *r;
588 	const struct imdr *imdr;
589 
590 	imdr = imd_entry_to_imdr(imd, entry);
591 
592 	if (imdr == NULL)
593 		return -1;
594 
595 	r = imdr_root(imdr);
596 
597 	if (root_is_locked(r))
598 		return -1;
599 
600 	if (entry != root_last_entry(r))
601 		return -1;
602 
603 	/* Don't remove entry covering root region */
604 	if (r->num_entries == 1)
605 		return -1;
606 
607 	r->num_entries--;
608 
609 	return 0;
610 }
611 
imdr_print_entries(const struct imdr * imdr,const char * indent,const struct imd_lookup * lookup,size_t size)612 static void imdr_print_entries(const struct imdr *imdr, const char *indent,
613 				const struct imd_lookup *lookup, size_t size)
614 {
615 	struct imd_root *r;
616 	size_t i;
617 	size_t j;
618 
619 	if (imdr == NULL)
620 		return;
621 
622 	r = imdr_root(imdr);
623 
624 	for (i = 0; i < r->num_entries; i++) {
625 		const char *name = NULL;
626 		const struct imd_entry *e = &r->entries[i];
627 
628 		for (j = 0; j < size; j++) {
629 			if (lookup[j].id == e->id) {
630 				name = lookup[j].name;
631 				break;
632 			}
633 		}
634 
635 		printk(BIOS_DEBUG, "%s", indent);
636 
637 		if (name == NULL)
638 			printk(BIOS_DEBUG, "%08x   ", e->id);
639 		else
640 			printk(BIOS_DEBUG, "%s", name);
641 		printk(BIOS_DEBUG, "%2zu. ", i);
642 		printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
643 		printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(e));
644 	}
645 }
646 
imd_print_entries(const struct imd * imd,const struct imd_lookup * lookup,size_t size)647 int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
648 			size_t size)
649 {
650 	if (imdr_root(&imd->lg) == NULL)
651 		return -1;
652 
653 	imdr_print_entries(&imd->lg, "", lookup, size);
654 	if (imdr_root(&imd->sm) != NULL) {
655 		printk(BIOS_DEBUG, "IMD small region:\n");
656 		imdr_print_entries(&imd->sm, "  ", lookup, size);
657 	}
658 
659 	return 0;
660 }
661 
imd_cursor_init(const struct imd * imd,struct imd_cursor * cursor)662 int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
663 {
664 	if (imd == NULL || cursor == NULL)
665 		return -1;
666 
667 	memset(cursor, 0, sizeof(*cursor));
668 
669 	cursor->imdr[0] = &imd->lg;
670 	cursor->imdr[1] = &imd->sm;
671 
672 	return 0;
673 }
674 
imd_cursor_next(struct imd_cursor * cursor)675 const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
676 {
677 	struct imd_root *r;
678 	const struct imd_entry *e;
679 
680 	if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
681 		return NULL;
682 
683 	r = imdr_root(cursor->imdr[cursor->current_imdr]);
684 
685 	if (r == NULL)
686 		return NULL;
687 
688 	if (cursor->current_entry >= r->num_entries) {
689 		/* Try next imdr. */
690 		cursor->current_imdr++;
691 		cursor->current_entry = 0;
692 		return imd_cursor_next(cursor);
693 	}
694 
695 	e = &r->entries[cursor->current_entry];
696 	cursor->current_entry++;
697 
698 	return e;
699 }
700