xref: /aosp_15_r20/external/coreboot/src/lib/imd_cbmem.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <assert.h>
4 #include <boot/coreboot_tables.h>
5 #include <bootmem.h>
6 #include <console/console.h>
7 #include <cbmem.h>
8 #include <imd.h>
9 #include <lib.h>
10 #include <types.h>
11 
12 /* The program loader passes on cbmem_top and the program entry point
13    has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
14 uintptr_t _cbmem_top_ptr;
15 
16 static struct imd imd;
17 
cbmem_top(void)18 uintptr_t cbmem_top(void)
19 {
20 	if (ENV_CREATES_CBMEM) {
21 		static uintptr_t top;
22 		if (top)
23 			return top;
24 		top = cbmem_top_chipset();
25 		return top;
26 	}
27 	if (ENV_POSTCAR || ENV_RAMSTAGE)
28 		return _cbmem_top_ptr;
29 
30 	dead_code();
31 }
32 
33 int cbmem_initialized;
34 
imd_to_cbmem(const struct imd_entry * e)35 static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
36 {
37 	return (const struct cbmem_entry *)e;
38 }
39 
cbmem_to_imd(const struct cbmem_entry * e)40 static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
41 {
42 	return (const struct imd_entry *)e;
43 }
44 
cbmem_initialize_empty(void)45 void cbmem_initialize_empty(void)
46 {
47 	cbmem_initialize_empty_id_size(0, 0);
48 }
49 
cbmem_top_init_once(void)50 static void cbmem_top_init_once(void)
51 {
52 	/* Call one-time hook on expected cbmem init during boot. */
53 	if (!ENV_CREATES_CBMEM)
54 		return;
55 
56 	/* The test is only effective on X86 and when address hits UC memory. */
57 	if (ENV_X86)
58 		quick_ram_check_or_die(cbmem_top() - sizeof(u32));
59 }
60 
cbmem_initialize_empty_id_size(u32 id,u64 size)61 void cbmem_initialize_empty_id_size(u32 id, u64 size)
62 {
63 	const int no_recovery = 0;
64 
65 	cbmem_top_init_once();
66 
67 	imd_handle_init(&imd, (void *)cbmem_top());
68 
69 	printk(BIOS_DEBUG, "CBMEM:\n");
70 
71 	if (imd_create_tiered_empty(&imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
72 					CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
73 		printk(BIOS_DEBUG, "failed.\n");
74 		return;
75 	}
76 
77 	/* Add the specified range first */
78 	if (size)
79 		cbmem_add(id, size);
80 
81 	/* Complete migration to CBMEM. */
82 	cbmem_run_init_hooks(no_recovery);
83 
84 	cbmem_initialized = 1;
85 }
86 
cbmem_initialize(void)87 int cbmem_initialize(void)
88 {
89 	return cbmem_initialize_id_size(0, 0);
90 }
91 
cbmem_initialize_id_size(u32 id,u64 size)92 int cbmem_initialize_id_size(u32 id, u64 size)
93 {
94 	const int recovery = 1;
95 
96 	cbmem_top_init_once();
97 
98 	imd_handle_init(&imd, (void *)cbmem_top());
99 
100 	if (imd_recover(&imd))
101 		return 1;
102 
103 	/*
104 	 * Lock the imd in romstage on a recovery. The assumption is that
105 	 * if the imd area was recovered in romstage then S3 resume path
106 	 * is being taken.
107 	 */
108 	if (ENV_CREATES_CBMEM)
109 		imd_lockdown(&imd);
110 
111 	/* Add the specified range first */
112 	if (size)
113 		cbmem_add(id, size);
114 
115 	/* Complete migration to CBMEM. */
116 	cbmem_run_init_hooks(recovery);
117 
118 	cbmem_initialized = 1;
119 
120 	/* Recovery successful. */
121 	return 0;
122 }
123 
cbmem_recovery(int is_wakeup)124 int cbmem_recovery(int is_wakeup)
125 {
126 	int rv = 0;
127 	if (!is_wakeup)
128 		cbmem_initialize_empty();
129 	else
130 		rv = cbmem_initialize();
131 	return rv;
132 }
133 
cbmem_entry_add(u32 id,u64 size64)134 const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
135 {
136 	const struct imd_entry *e;
137 
138 	e = imd_entry_find_or_add(&imd, id, size64);
139 
140 	return imd_to_cbmem(e);
141 }
142 
cbmem_add(u32 id,u64 size)143 void *cbmem_add(u32 id, u64 size)
144 {
145 	const struct imd_entry *e;
146 
147 	e = imd_entry_find_or_add(&imd, id, size);
148 
149 	if (e == NULL)
150 		return NULL;
151 
152 	return imd_entry_at(&imd, e);
153 }
154 
155 /* Retrieve a region provided a given id. */
cbmem_entry_find(u32 id)156 const struct cbmem_entry *cbmem_entry_find(u32 id)
157 {
158 	const struct imd_entry *e;
159 
160 	e = imd_entry_find(&imd, id);
161 
162 	return imd_to_cbmem(e);
163 }
164 
cbmem_find(u32 id)165 void *cbmem_find(u32 id)
166 {
167 	const struct imd_entry *e;
168 
169 	e = imd_entry_find(&imd, id);
170 
171 	if (e == NULL)
172 		return NULL;
173 
174 	return imd_entry_at(&imd, e);
175 }
176 
177 /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
178  * cannot be removed unless it was the last one added. */
cbmem_entry_remove(const struct cbmem_entry * entry)179 int cbmem_entry_remove(const struct cbmem_entry *entry)
180 {
181 	return imd_entry_remove(&imd, cbmem_to_imd(entry));
182 }
183 
cbmem_entry_size(const struct cbmem_entry * entry)184 u64 cbmem_entry_size(const struct cbmem_entry *entry)
185 {
186 	return imd_entry_size(cbmem_to_imd(entry));
187 }
188 
cbmem_entry_start(const struct cbmem_entry * entry)189 void *cbmem_entry_start(const struct cbmem_entry *entry)
190 {
191 	return imd_entry_at(&imd, cbmem_to_imd(entry));
192 }
193 
cbmem_add_bootmem(void)194 void cbmem_add_bootmem(void)
195 {
196 	void *baseptr = NULL;
197 	size_t size = 0;
198 
199 	cbmem_get_region(&baseptr, &size);
200 	bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
201 }
202 
cbmem_get_region(void ** baseptr,size_t * size)203 void cbmem_get_region(void **baseptr, size_t *size)
204 {
205 	imd_region_used(&imd, baseptr, size);
206 }
207 
208 #if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) && ENV_HAS_CBMEM)
209 /*
210  * -fdata-sections doesn't work so well on read only strings. They all
211  * get put in the same section even though those strings may never be
212  * referenced in the final binary.
213  */
cbmem_list(void)214 void cbmem_list(void)
215 {
216 	static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
217 
218 	imd_print_entries(&imd, lookup, ARRAY_SIZE(lookup));
219 }
220 #endif
221 
cbmem_add_records_to_cbtable(struct lb_header * header)222 void cbmem_add_records_to_cbtable(struct lb_header *header)
223 {
224 	struct imd_cursor cursor;
225 
226 	if (imd_cursor_init(&imd, &cursor))
227 		return;
228 
229 	while (1) {
230 		const struct imd_entry *e;
231 		struct lb_cbmem_entry *lbe;
232 		uint32_t id;
233 
234 		e = imd_cursor_next(&cursor);
235 
236 		if (e == NULL)
237 			break;
238 
239 		id = imd_entry_id(e);
240 		/* Don't add these metadata entries. */
241 		if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
242 			continue;
243 
244 		lbe = (struct lb_cbmem_entry *)lb_new_record(header);
245 		lbe->tag = LB_TAG_CBMEM_ENTRY;
246 		lbe->size = sizeof(*lbe);
247 		lbe->address = (uintptr_t)imd_entry_at(&imd, e);
248 		lbe->entry_size = imd_entry_size(e);
249 		lbe->id = id;
250 	}
251 }
252