xref: /aosp_15_r20/external/coreboot/tests/lib/bootmem-test.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <bootmem.h>
4 #include <commonlib/coreboot_tables.h>
5 #include <device/device.h>
6 #include <memrange.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <symbols.h>
10 #include <tests/test.h>
11 
12 /* Stubs defined to satisfy linker dependencies */
cbmem_add_bootmem(void)13 void cbmem_add_bootmem(void)
14 {
15 }
16 
bootmem_arch_add_ranges(void)17 void bootmem_arch_add_ranges(void)
18 {
19 }
20 
21 struct bootmem_ranges_t {
22 	uint64_t start;
23 	uint64_t size;
24 	uint32_t type;
25 };
26 
27 /* Define symbols for regions required by bootmem.
28    Define constants for regions that do not need to be defined in the executable.
29    There is no need for region memory, just start, end and size symbols are required.
30    Only used values are defined. */
31 #define ZERO_REGION_START ((uintptr_t)0x0)
32 #define ZERO_REGION_SIZE  ((uintptr_t)0x10000)
33 
34 TEST_REGION_UNALLOCATED(program, 0x10000000, 0x40000);
35 #define PROGRAM_START ((uintptr_t)_program)
36 #define PROGRAM_SIZE  REGION_SIZE(program)
37 
38 #define CACHEABLE_START	((uintptr_t)0x10000000ULL)
39 #define CACHEABLE_SIZE	((uintptr_t)0x100000000ULL)
40 #define CACHEABLE_END	((uintptr_t)(CACHEABLE_START + CACHEABLE_SIZE))
41 
42 /* Stack region end address is hardcoded because `<const> - <symbol>` does not work in GCC */
43 TEST_REGION_UNALLOCATED(stack, 0x10040000, 0x1000);
44 #define STACK_START ((uintptr_t)_stack)
45 #define STACK_SIZE  REGION_SIZE(stack)
46 #define STACK_END   ((uintptr_t)(0x10040000 + 0x1000))
47 
48 #define RESERVED_START	((uintptr_t)0x100000000ULL)
49 #define RESERVED_SIZE	((uintptr_t)0x100000)
50 #define RESERVED_END	((uintptr_t)(RESERVED_START + RESERVED_SIZE))
51 
52 TEST_REGION_UNALLOCATED(ramstage, 0x10000000, 0x41000);
53 #define RAMSTAGE_START  ((uintptr_t)_ramstage)
54 #define RAMSTAGE_SIZE	REGION_SIZE(ramstage)
55 
56 #define CACHEABLE_START_TO_RESERVED_START_SIZE (RESERVED_START - CACHEABLE_START)
57 #define RESERVED_END_TO_CACHEABLE_END_SIZE (CACHEABLE_END - RESERVED_END)
58 #define STACK_END_TO_RESERVED_START_SIZE (RESERVED_START - STACK_END)
59 
60 
61 /* Bootmem layout for tests
62  *
63  * Regions marked with asterisks (***) are not visible for OS
64  *
65  *     +------------------ZERO-----------------+ <-0x0
66  *     |                                       |
67  *     +---------------------------------------+ <-0x10000
68  *
69  *     +-------+----CACHEABLE_MEMORY---------+-+ <-0x10000000
70  *     |       |        ***PROGRAM***        | |
71  *     |       +-----------------------------+ | <-0x10040000
72  *     |       |         ***STACK***         | |
73  *     |       +-----------------------------+ | <-0x10041000
74  *     |                                       |
75  *     |                                       |
76  *     |                                       |
77  *     |       +-------RESERVED_MEMORY-------+ | <-0x100000000
78  *     |       |                             | |
79  *     |       |                             | |
80  *     |       |                             | |
81  *     |       +-----------------------------+ | <-0x100100000
82  *     |                                       |
83  *     |                                       |
84  *     +---------------------------------------+ <-0x110000000
85  *
86  * Ramstage covers PROGRAM and STACK regions.
87  */
88 struct bootmem_ranges_t os_ranges_mock[] = {
89 	[0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
90 		.type = BM_MEM_RAM},
91 	[1] = { .start = CACHEABLE_START, .size = CACHEABLE_START_TO_RESERVED_START_SIZE,
92 		.type = BM_MEM_RAM },
93 	[2] = { .start = RESERVED_START, .size = RESERVED_SIZE,
94 		.type = BM_MEM_RESERVED },
95 	[3] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
96 		.type = BM_MEM_RAM },
97 };
98 
99 struct bootmem_ranges_t ranges_mock[] = {
100 	[0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
101 		.type = BM_MEM_RAM },
102 	[1] = { .start = RAMSTAGE_START, .size = RAMSTAGE_SIZE,
103 		.type = BM_MEM_RAMSTAGE },
104 	[2] = { .start = STACK_END, .size = STACK_END_TO_RESERVED_START_SIZE,
105 		.type = BM_MEM_RAM },
106 	[3] = { .start = RESERVED_START, .size = RESERVED_SIZE,
107 		.type = BM_MEM_RESERVED },
108 	[4] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
109 		.type = BM_MEM_RAM },
110 };
111 
112 struct bootmem_ranges_t *os_ranges = os_ranges_mock;
113 struct bootmem_ranges_t *ranges = ranges_mock;
114 
115 /* Note that second region overlaps first */
116 struct resource res_mock[] = {
117 	{ .base = ZERO_REGION_START, .size = ZERO_REGION_SIZE, .next = &res_mock[1],
118 	  .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
119 	{ .base = CACHEABLE_START, .size = CACHEABLE_SIZE, .next = &res_mock[2],
120 	  .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
121 	{ .base = RESERVED_START, .size = RESERVED_SIZE, .next = NULL,
122 	  .flags = IORESOURCE_RESERVE | IORESOURCE_MEM | IORESOURCE_ASSIGNED }
123 };
124 
125 /* Device simulating RAM */
126 struct device mem_device_mock = {
127 	.enabled = 1,
128 	.resource_list = res_mock,
129 	.next = NULL
130 };
131 
132 struct device *all_devices = &mem_device_mock;
133 
134 /* Simplified version for the purpose of tests */
bootmem_to_lb_tag(const enum bootmem_type tag)135 static uint32_t bootmem_to_lb_tag(const enum bootmem_type tag)
136 {
137 	switch (tag) {
138 	case BM_MEM_RAM:
139 		return LB_MEM_RAM;
140 	case BM_MEM_RESERVED:
141 		return LB_MEM_RESERVED;
142 	default:
143 		return LB_MEM_RESERVED;
144 	}
145 }
146 
test_bootmem_write_mem_table(void ** state)147 static void test_bootmem_write_mem_table(void **state)
148 {
149 	/* Space for 10 lb_mem entries to be safe */
150 	const size_t lb_mem_max_size = sizeof(struct lb_memory)
151 					+ 10 * sizeof(struct lb_memory_range);
152 	const size_t expected_allocation_size =
153 			(sizeof(struct lb_memory)
154 				+ ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
155 	const size_t required_unused_space_size = lb_mem_max_size - expected_allocation_size;
156 	int i;
157 	struct lb_memory *lb_mem;
158 	/* Allocate buffer and fill it. Use it to ensure correct size of space used
159 	   by bootmem_write_memory_table() */
160 	u8 sentinel_value_buffer[required_unused_space_size];
161 	memset(sentinel_value_buffer, 0x77, required_unused_space_size);
162 
163 	lb_mem = malloc(lb_mem_max_size);
164 	lb_mem->tag = LB_TAG_MEMORY;
165 	lb_mem->size = sizeof(*lb_mem);
166 	/* Fill rest of buffer with sentinel value */
167 	memset(((u8 *)lb_mem) + expected_allocation_size, 0x77, required_unused_space_size);
168 
169 	bootmem_write_memory_table(lb_mem);
170 
171 	/* There should be only `os_ranges_mock` entries visible in coreboot table */
172 	assert_int_equal(lb_mem->size, sizeof(*lb_mem) +
173 			ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
174 	assert_memory_equal(sentinel_value_buffer,
175 			((u8 *)lb_mem) + expected_allocation_size,
176 			required_unused_space_size);
177 
178 	for (i = 0; i < lb_mem->size / sizeof(struct lb_memory_range); i++) {
179 		assert_int_equal(lb_mem->map[i].start, os_ranges[i].start);
180 		assert_int_equal(lb_mem->map[i].size, os_ranges[i].size);
181 		assert_int_equal(lb_mem->map[i].type, bootmem_to_lb_tag(os_ranges[i].type));
182 	}
183 
184 	free(lb_mem);
185 }
186 
187 int os_bootmem_walk_cnt;
188 int bootmem_walk_cnt;
189 
verify_os_bootmem_walk(const struct range_entry * r,void * arg)190 static bool verify_os_bootmem_walk(const struct range_entry *r, void *arg)
191 {
192 	assert_int_equal(range_entry_base(r), os_ranges[os_bootmem_walk_cnt].start);
193 	assert_int_equal(range_entry_size(r), os_ranges[os_bootmem_walk_cnt].size);
194 	assert_int_equal(range_entry_tag(r), os_ranges[os_bootmem_walk_cnt].type);
195 
196 	os_bootmem_walk_cnt++;
197 
198 	return true;
199 }
200 
verify_bootmem_walk(const struct range_entry * r,void * arg)201 static bool verify_bootmem_walk(const struct range_entry *r, void *arg)
202 {
203 	assert_int_equal(range_entry_base(r), ranges[bootmem_walk_cnt].start);
204 	assert_int_equal(range_entry_size(r), ranges[bootmem_walk_cnt].size);
205 	assert_int_equal(range_entry_tag(r), ranges[bootmem_walk_cnt].type);
206 
207 	bootmem_walk_cnt++;
208 
209 	return true;
210 }
211 
count_entries_os_bootmem_walk(const struct range_entry * r,void * arg)212 static bool count_entries_os_bootmem_walk(const struct range_entry *r, void *arg)
213 {
214 	os_bootmem_walk_cnt++;
215 
216 	return true;
217 }
218 
count_entries_bootmem_walk(const struct range_entry * r,void * arg)219 static bool count_entries_bootmem_walk(const struct range_entry *r, void *arg)
220 {
221 	bootmem_walk_cnt++;
222 
223 	return true;
224 }
225 
226 /* This function initializes bootmem using bootmem_write_memory_table().
227    bootmem_init() is not accessible directly because it is static. */
init_memory_table_library(void)228 static void init_memory_table_library(void)
229 {
230 	struct lb_memory *lb_mem;
231 
232 	/* Allocate space for 10 lb_mem entries to be safe */
233 	lb_mem = malloc(sizeof(*lb_mem) + 10 * sizeof(struct lb_memory_range));
234 	lb_mem->tag = LB_TAG_MEMORY;
235 	lb_mem->size = sizeof(*lb_mem);
236 
237 	/* We need to call this only to initialize library */
238 	bootmem_write_memory_table(lb_mem);
239 
240 	free(lb_mem);
241 }
242 
test_bootmem_add_range(void ** state)243 static void test_bootmem_add_range(void **state)
244 {
245 	init_memory_table_library();
246 
247 	os_bootmem_walk_cnt = 0;
248 	bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
249 	assert_int_equal(os_bootmem_walk_cnt, 4);
250 
251 	bootmem_walk_cnt = 0;
252 	bootmem_walk(count_entries_bootmem_walk, NULL);
253 	assert_int_equal(bootmem_walk_cnt, 5);
254 
255 	expect_assert_failure(
256 		bootmem_add_range(ALIGN_UP(PROGRAM_START, 4096),
257 				  ALIGN_DOWN(PROGRAM_SIZE / 2, 4096),
258 				  BM_MEM_ACPI)
259 	);
260 
261 	os_bootmem_walk_cnt = 0;
262 	bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
263 	assert_int_equal(os_bootmem_walk_cnt, 4);
264 
265 	bootmem_walk_cnt = 0;
266 	bootmem_walk(count_entries_bootmem_walk, NULL);
267 	assert_int_equal(bootmem_walk_cnt, 6);
268 
269 	/* Do not expect assert failure as BM_MEM_RAMSTAGE should not be added to os_bootmem */
270 	bootmem_add_range(ALIGN_UP(STACK_END + 4096, 4096),
271 			  ALIGN_DOWN(STACK_END_TO_RESERVED_START_SIZE / 2, 4096),
272 			  BM_MEM_RAMSTAGE);
273 
274 	os_bootmem_walk_cnt = 0;
275 	bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
276 	assert_int_equal(os_bootmem_walk_cnt, 4);
277 
278 	/* Two entries are added because added range is in middle of another */
279 	bootmem_walk_cnt = 0;
280 	bootmem_walk(count_entries_bootmem_walk, NULL);
281 	assert_int_equal(bootmem_walk_cnt, 8);
282 }
283 
test_bootmem_walk(void ** state)284 static void test_bootmem_walk(void **state)
285 {
286 	init_memory_table_library();
287 
288 	os_bootmem_walk_cnt = 0;
289 	bootmem_walk_os_mem(verify_os_bootmem_walk, NULL);
290 	assert_int_equal(os_bootmem_walk_cnt, 4);
291 
292 	bootmem_walk_cnt = 0;
293 	bootmem_walk(verify_bootmem_walk, NULL);
294 	assert_int_equal(bootmem_walk_cnt, 5);
295 }
296 
test_bootmem_region_targets_type(void ** state)297 static void test_bootmem_region_targets_type(void **state)
298 {
299 	int ret;
300 	u64 subregion_start;
301 	u64 subregion_size;
302 
303 	init_memory_table_library();
304 
305 	/* Single whole region */
306 	ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RAMSTAGE);
307 	assert_int_equal(ret, 1);
308 
309 	/* Expect fail because of incorrect bootmem_type */
310 	ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RESERVED);
311 	assert_int_equal(ret, 0);
312 
313 	/* Range covering one more byte than one region */
314 	ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE + 1, BM_MEM_RAMSTAGE);
315 	assert_int_equal(ret, 0);
316 
317 	/* Expect success for subregion of ramstage stretching from point in program range
318 	   to point in stack range. */
319 	subregion_start = PROGRAM_START + PROGRAM_SIZE / 4;
320 	subregion_size = STACK_END - STACK_SIZE / 4 - subregion_start;
321 	ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAMSTAGE);
322 	assert_int_equal(ret, 1);
323 
324 	/* Expect fail for range covering more than one tag as there is no BM_MEM_CACHEABLE */
325 	subregion_start = STACK_START + STACK_SIZE / 2;
326 	subregion_size = RESERVED_START + RESERVED_SIZE / 4 * 3 - subregion_start;
327 	ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAM);
328 	assert_int_equal(ret, 0);
329 
330 	/* Middle of range should not fail */
331 	ret = bootmem_region_targets_type(RESERVED_START + RESERVED_SIZE / 4,
332 					  RESERVED_SIZE / 2, BM_MEM_RESERVED);
333 	assert_int_equal(ret, 1);
334 
335 	/* Subsection of range bordering end edge */
336 	ret = bootmem_region_targets_type(RESERVED_END + RESERVED_END_TO_CACHEABLE_END_SIZE / 2,
337 					  RESERVED_END_TO_CACHEABLE_END_SIZE / 2, BM_MEM_RAM);
338 	assert_int_equal(ret, 1);
339 
340 	/* Region touching zero */
341 	ret = bootmem_region_targets_type(ZERO_REGION_START, ZERO_REGION_SIZE, BM_MEM_RAM);
342 	assert_int_equal(ret, 1);
343 
344 	/* Expect failure when passing zero as size. */
345 	ret = bootmem_region_targets_type(ZERO_REGION_START, 0, BM_MEM_RAM);
346 	assert_int_equal(ret, 0);
347 	ret = bootmem_region_targets_type(RESERVED_START, 0, BM_MEM_RESERVED);
348 	assert_int_equal(ret, 0);
349 }
350 
351 /* Action function used to check alignment of size and base of allocated ranges */
verify_bootmem_allocate_buffer(const struct range_entry * r,void * arg)352 static bool verify_bootmem_allocate_buffer(const struct range_entry *r, void *arg)
353 {
354 	if (range_entry_tag(r) == BM_MEM_PAYLOAD) {
355 		assert_true(IS_ALIGNED(range_entry_base(r), 4096));
356 		assert_true(IS_ALIGNED(range_entry_size(r), 4096));
357 	}
358 
359 	return true;
360 }
361 
362 
test_bootmem_allocate_buffer(void ** state)363 static void test_bootmem_allocate_buffer(void **state)
364 {
365 	void *buf;
366 	void *prev;
367 
368 	init_memory_table_library();
369 
370 	/* All allocated buffers should be below 32bit boundary */
371 	buf = bootmem_allocate_buffer(1ULL << 32);
372 	assert_null(buf);
373 
374 	/* Try too big size for our BM_MEM_RAM range below 32bit boundary */
375 	buf = bootmem_allocate_buffer(RESERVED_START - PROGRAM_START);
376 	assert_null(buf);
377 
378 	/* Two working cases */
379 	buf = bootmem_allocate_buffer(0xE0000000);
380 	assert_non_null(buf);
381 	assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
382 							0xE0000000, BM_MEM_PAYLOAD));
383 	assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
384 	/* Check if allocated (payload) ranges have their base and size aligned */
385 	bootmem_walk(verify_bootmem_allocate_buffer, NULL);
386 
387 	prev = buf;
388 	buf = bootmem_allocate_buffer(0xF000000);
389 	assert_non_null(buf);
390 	assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
391 							0xF000000, BM_MEM_PAYLOAD));
392 	assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
393 	/* Check if newly allocated buffer does not overlap with previously allocated range */
394 	assert_not_in_range((uintptr_t)buf, (uintptr_t)prev, (uintptr_t)prev + 0xE0000000);
395 	/* Check if allocated (payload) ranges have their base and size aligned */
396 	bootmem_walk(verify_bootmem_allocate_buffer, NULL);
397 
398 	/* Run out of memory for new allocations */
399 	buf = bootmem_allocate_buffer(0x1000000);
400 	assert_null(buf);
401 }
402 
main(void)403 int main(void)
404 {
405 	const struct CMUnitTest tests[] = {
406 		cmocka_unit_test(test_bootmem_write_mem_table),
407 		cmocka_unit_test(test_bootmem_add_range),
408 		cmocka_unit_test(test_bootmem_walk),
409 		cmocka_unit_test(test_bootmem_allocate_buffer),
410 		cmocka_unit_test(test_bootmem_region_targets_type)
411 	};
412 
413 	return cb_run_group_tests(tests, NULL, NULL);
414 }
415