xref: /aosp_15_r20/external/coreboot/src/lib/cbfs.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <assert.h>
4 #include <boot_device.h>
5 #include <cbfs.h>
6 #include <cbmem.h>
7 #include <commonlib/bsd/cbfs_private.h>
8 #include <commonlib/bsd/compression.h>
9 #include <commonlib/list.h>
10 #include <console/console.h>
11 #include <fmap.h>
12 #include <lib.h>
13 #include <metadata_hash.h>
14 #include <security/tpm/tspi/crtm.h>
15 #include <security/vboot/vboot_common.h>
16 #include <security/vboot/misc.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <symbols.h>
20 #include <thread.h>
21 #include <timestamp.h>
22 
23 #if ENV_X86 && (ENV_POSTCAR || ENV_SMM)
24 struct mem_pool cbfs_cache = MEM_POOL_INIT(NULL, 0, 0);
25 #elif CONFIG(POSTRAM_CBFS_CACHE_IN_BSS) && ENV_RAMSTAGE
26 static u8 cache_buffer[CONFIG_RAMSTAGE_CBFS_CACHE_SIZE];
27 struct mem_pool cbfs_cache =
28 	MEM_POOL_INIT(cache_buffer, sizeof(cache_buffer), CONFIG_CBFS_CACHE_ALIGN);
29 #else
30 struct mem_pool cbfs_cache =
31 	MEM_POOL_INIT(_cbfs_cache, REGION_SIZE(cbfs_cache), CONFIG_CBFS_CACHE_ALIGN);
32 #endif
33 
switch_to_postram_cache(int unused)34 static void switch_to_postram_cache(int unused)
35 {
36 	if (_preram_cbfs_cache != _postram_cbfs_cache)
37 		mem_pool_init(&cbfs_cache, _postram_cbfs_cache, REGION_SIZE(postram_cbfs_cache),
38 			      CONFIG_CBFS_CACHE_ALIGN);
39 }
40 CBMEM_CREATION_HOOK(switch_to_postram_cache);
41 
_cbfs_boot_lookup(const char * name,bool force_ro,union cbfs_mdata * mdata,struct region_device * rdev)42 enum cb_err _cbfs_boot_lookup(const char *name, bool force_ro,
43 			      union cbfs_mdata *mdata, struct region_device *rdev)
44 {
45 	const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
46 	if (!cbd)
47 		return CB_ERR;
48 
49 	size_t data_offset;
50 	enum cb_err err = CB_CBFS_CACHE_FULL;
51 	if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size)
52 		err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
53 					 name, mdata, &data_offset);
54 	if (err == CB_CBFS_CACHE_FULL) {
55 		struct vb2_hash *metadata_hash = NULL;
56 		if (CONFIG(TOCTOU_SAFETY)) {
57 			if (ENV_SMM)  /* Cannot provide TOCTOU safety for SMM */
58 				dead_code();
59 			if (!cbd->mcache_size)
60 				die("Cannot access CBFS TOCTOU-safely in " ENV_STRING " before CBMEM init!\n");
61 			/* We can only reach this for the RW CBFS -- an mcache overflow in the
62 			   RO CBFS would have been caught when building the mcache in cbfs_get
63 			   boot_device(). (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
64 			assert(cbd == vboot_get_cbfs_boot_device());
65 			if (!CONFIG(VBOOT)
66 			    || vb2api_get_metadata_hash(vboot_get_context(), &metadata_hash)
67 				       != VB2_SUCCESS)
68 				die("Failed to get RW metadata hash");
69 		}
70 		err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset, metadata_hash);
71 	}
72 
73 	if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro && err == CB_CBFS_NOT_FOUND) {
74 		printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n", name);
75 		return _cbfs_boot_lookup(name, true, mdata, rdev);
76 	}
77 	if (err) {
78 		if (err == CB_CBFS_NOT_FOUND)
79 			printk(BIOS_WARNING, "CBFS: '%s' not found.\n", name);
80 		else if (err == CB_CBFS_HASH_MISMATCH)
81 			printk(BIOS_ERR, "CBFS ERROR: metadata hash mismatch!\n");
82 		else
83 			printk(BIOS_ERR, "CBFS ERROR: error %d when looking up '%s'\n",
84 			       err, name);
85 		return err;
86 	}
87 
88 	if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
89 		return CB_ERR;
90 
91 	return CB_SUCCESS;
92 }
93 
cbfs_unmap(void * mapping)94 void cbfs_unmap(void *mapping)
95 {
96 	/*
97 	 * This is save to call with mappings that weren't allocated in the cache (e.g. x86
98 	 * direct mappings) -- mem_pool_free() just does nothing for addresses it doesn't
99 	 * recognize. This hardcodes the assumption that if platforms implement an rdev_mmap()
100 	 * that requires a free() for the boot_device, they need to implement it via the
101 	 * cbfs_cache mem_pool.
102 	 */
103 	mem_pool_free(&cbfs_cache, mapping);
104 }
105 
fsps_env(void)106 static inline bool fsps_env(void)
107 {
108 	/* FSP-S is assumed to be loaded in ramstage. */
109 	if (ENV_RAMSTAGE)
110 		return true;
111 	return false;
112 }
113 
fspm_env(void)114 static inline bool fspm_env(void)
115 {
116 	/* FSP-M is assumed to be loaded in romstage. */
117 	if (ENV_RAMINIT)
118 		return true;
119 	return false;
120 }
121 
cbfs_lz4_enabled(void)122 static inline bool cbfs_lz4_enabled(void)
123 {
124 	if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
125 		return true;
126 	if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
127 		return true;
128 
129 	if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
130 		return false;
131 
132 	if (ENV_SMM)
133 		return false;
134 
135 	return true;
136 }
137 
cbfs_lzma_enabled(void)138 static inline bool cbfs_lzma_enabled(void)
139 {
140 	if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
141 		return true;
142 	if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
143 		return true;
144 
145 	/* Payload loader (ramstage) always needs LZMA. */
146 	if (ENV_PAYLOAD_LOADER)
147 		return true;
148 	/* Only other use of LZMA is ramstage compression. */
149 	if (!CONFIG(COMPRESS_RAMSTAGE_LZMA))
150 		return false;
151 	/* If there is a postcar, it loads the ramstage. */
152 	if (CONFIG(POSTCAR_STAGE))
153 		return ENV_POSTCAR;
154 	/* If there is no postcar but a separate romstage, it loads the ramstage. */
155 	if (CONFIG(SEPARATE_ROMSTAGE))
156 		return ENV_SEPARATE_ROMSTAGE;
157 	/* Otherwise, the combined bootblock+romstage loads the ramstage. */
158 	return ENV_BOOTBLOCK;
159 }
160 
cbfs_file_hash_mismatch(const void * buffer,size_t size,const union cbfs_mdata * mdata,bool skip_verification)161 static bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
162 				    const union cbfs_mdata *mdata, bool skip_verification)
163 {
164 	/* Avoid linking hash functions when verification and measurement are disabled. */
165 	if (!CONFIG(CBFS_VERIFICATION) && !CONFIG(TPM_MEASURED_BOOT))
166 		return false;
167 
168 	const struct vb2_hash *hash = NULL;
169 
170 	if (CONFIG(CBFS_VERIFICATION) && !skip_verification) {
171 		hash = cbfs_file_hash(mdata);
172 		if (!hash) {
173 			ERROR("'%s' does not have a file hash!\n", mdata->h.filename);
174 			return true;
175 		}
176 
177 		vb2_error_t rv = vb2_hash_verify(vboot_hwcrypto_allowed(), buffer, size, hash);
178 		if (rv != VB2_SUCCESS) {
179 			ERROR("'%s' file hash mismatch!\n", mdata->h.filename);
180 			if (CONFIG(VBOOT_CBFS_INTEGRATION) && !vboot_recovery_mode_enabled()
181 			    && vboot_logic_executed())
182 				vboot_fail_and_reboot(vboot_get_context(), VB2_RECOVERY_FW_BODY,
183 						      rv);
184 			return true;
185 		}
186 	}
187 
188 	if (CONFIG(TPM_MEASURED_BOOT) && !ENV_SMM) {
189 		struct vb2_hash calculated_hash;
190 
191 		/* No need to re-hash file if we already have it from verification. */
192 		if (!hash || hash->algo != TPM_MEASURE_ALGO) {
193 			if (vb2_hash_calculate(vboot_hwcrypto_allowed(), buffer, size,
194 					       TPM_MEASURE_ALGO, &calculated_hash))
195 				hash = NULL;
196 			else
197 				hash = &calculated_hash;
198 		}
199 
200 		if (!hash ||
201 		    tspi_cbfs_measurement(mdata->h.filename, be32toh(mdata->h.type), hash))
202 			ERROR("failed to measure '%s' into TPM log\n", mdata->h.filename);
203 			/* We intentionally continue to boot on measurement errors. */
204 	}
205 
206 	return false;
207 }
208 
cbfs_load_and_decompress(const struct region_device * rdev,void * buffer,size_t buffer_size,uint32_t compression,const union cbfs_mdata * mdata,bool skip_verification)209 static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer,
210 				       size_t buffer_size, uint32_t compression,
211 				       const union cbfs_mdata *mdata, bool skip_verification)
212 {
213 	size_t in_size = region_device_sz(rdev);
214 	size_t out_size = 0;
215 	void *map;
216 
217 	DEBUG("Decompressing %zu bytes from '%s' to %p with algo %d\n",
218 	      in_size, mdata->h.filename, buffer, compression);
219 
220 	if (CONFIG(CBFS_VERIFICATION) && !CONFIG(CBFS_ALLOW_UNVERIFIED_DECOMPRESSION) &&
221 	    skip_verification && compression != CBFS_COMPRESS_NONE) {
222 		ERROR("Refusing to decompress unverified file '%s' with algo %d\n",
223 		      mdata->h.filename, compression);
224 		return 0;
225 	}
226 
227 	switch (compression) {
228 	case CBFS_COMPRESS_NONE:
229 		if (buffer_size < in_size)
230 			return 0;
231 		if (rdev_readat(rdev, buffer, 0, in_size) != in_size)
232 			return 0;
233 		if (cbfs_file_hash_mismatch(buffer, in_size, mdata, skip_verification))
234 			return 0;
235 		return in_size;
236 
237 	case CBFS_COMPRESS_LZ4:
238 		if (!cbfs_lz4_enabled())
239 			return 0;
240 
241 		/* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by
242 		   setting up the rdev to be in memory. */
243 		map = rdev_mmap_full(rdev);
244 		if (map == NULL)
245 			return 0;
246 
247 		if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
248 			timestamp_add_now(TS_ULZ4F_START);
249 			out_size = ulz4fn(map, in_size, buffer, buffer_size);
250 			timestamp_add_now(TS_ULZ4F_END);
251 		}
252 
253 		rdev_munmap(rdev, map);
254 
255 		return out_size;
256 
257 	case CBFS_COMPRESS_LZMA:
258 		if (!cbfs_lzma_enabled())
259 			return 0;
260 		map = rdev_mmap_full(rdev);
261 		if (map == NULL)
262 			return 0;
263 
264 		if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
265 			/* Note: timestamp not useful for memory-mapped media (x86) */
266 			timestamp_add_now(TS_ULZMA_START);
267 			out_size = ulzman(map, in_size, buffer, buffer_size);
268 			timestamp_add_now(TS_ULZMA_END);
269 		}
270 
271 		rdev_munmap(rdev, map);
272 
273 		return out_size;
274 
275 	default:
276 		return 0;
277 	}
278 }
279 
280 struct cbfs_preload_context {
281 	struct region_device rdev;
282 	struct thread_handle handle;
283 	struct list_node list_node;
284 	void *buffer;
285 	char name[];
286 };
287 
288 static struct list_node cbfs_preload_context_list;
289 
alloc_cbfs_preload_context(size_t additional)290 static struct cbfs_preload_context *alloc_cbfs_preload_context(size_t additional)
291 {
292 	struct cbfs_preload_context *context;
293 	size_t size = sizeof(*context) + additional;
294 
295 	context = mem_pool_alloc(&cbfs_cache, size);
296 
297 	if (!context)
298 		return NULL;
299 
300 	memset(context, 0, size);
301 
302 	return context;
303 }
304 
append_cbfs_preload_context(struct cbfs_preload_context * context)305 static void append_cbfs_preload_context(struct cbfs_preload_context *context)
306 {
307 	list_append(&context->list_node, &cbfs_preload_context_list);
308 }
309 
free_cbfs_preload_context(struct cbfs_preload_context * context)310 static void free_cbfs_preload_context(struct cbfs_preload_context *context)
311 {
312 	list_remove(&context->list_node);
313 
314 	mem_pool_free(&cbfs_cache, context);
315 }
316 
cbfs_preload_thread_entry(void * arg)317 static enum cb_err cbfs_preload_thread_entry(void *arg)
318 {
319 	struct cbfs_preload_context *context = arg;
320 
321 	if (rdev_read_full(&context->rdev, context->buffer) < 0) {
322 		ERROR("%s(name='%s') readat failed\n", __func__, context->name);
323 		return CB_ERR;
324 	}
325 
326 	return CB_SUCCESS;
327 }
328 
cbfs_preload(const char * name)329 void cbfs_preload(const char *name)
330 {
331 	struct region_device rdev;
332 	union cbfs_mdata mdata;
333 	struct cbfs_preload_context *context;
334 	bool force_ro = false;
335 	size_t size;
336 
337 	if (!CONFIG(CBFS_PRELOAD))
338 		dead_code();
339 
340 	/* We don't want to cross the vboot boundary */
341 	if (ENV_SEPARATE_ROMSTAGE && CONFIG(VBOOT_STARTS_IN_ROMSTAGE))
342 		return;
343 
344 	DEBUG("%s(name='%s')\n", __func__, name);
345 
346 	if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
347 		return;
348 
349 	size = region_device_sz(&rdev);
350 
351 	context = alloc_cbfs_preload_context(strlen(name) + 1);
352 	if (!context) {
353 		ERROR("%s(name='%s') failed to allocate preload context\n", __func__, name);
354 		return;
355 	}
356 
357 	context->buffer = mem_pool_alloc(&cbfs_cache, size);
358 	if (context->buffer == NULL) {
359 		ERROR("%s(name='%s') failed to allocate %zu bytes for preload buffer\n",
360 		      __func__, name, size);
361 		goto out;
362 	}
363 
364 	context->rdev = rdev;
365 	strcpy(context->name, name);
366 
367 	append_cbfs_preload_context(context);
368 
369 	if (thread_run(&context->handle, cbfs_preload_thread_entry, context) == 0)
370 		return;
371 
372 	ERROR("%s(name='%s') failed to start preload thread\n", __func__, name);
373 	mem_pool_free(&cbfs_cache, context->buffer);
374 
375 out:
376 	free_cbfs_preload_context(context);
377 }
378 
find_cbfs_preload_context(const char * name)379 static struct cbfs_preload_context *find_cbfs_preload_context(const char *name)
380 {
381 	struct cbfs_preload_context *context;
382 
383 	list_for_each(context, cbfs_preload_context_list, list_node) {
384 		if (strcmp(context->name, name) == 0)
385 			return context;
386 	}
387 
388 	return NULL;
389 }
390 
get_preload_rdev(struct region_device * rdev,const char * name)391 static enum cb_err get_preload_rdev(struct region_device *rdev, const char *name)
392 {
393 	enum cb_err err;
394 	struct cbfs_preload_context *context;
395 
396 	if (!CONFIG(CBFS_PRELOAD) || !ENV_SUPPORTS_COOP)
397 		return CB_ERR_ARG;
398 
399 	context = find_cbfs_preload_context(name);
400 	if (!context)
401 		return CB_ERR_ARG;
402 
403 	err = thread_join(&context->handle);
404 	if (err != CB_SUCCESS) {
405 		ERROR("%s(name='%s') Preload thread failed: %u\n", __func__, name, err);
406 
407 		goto out;
408 	}
409 
410 	if (rdev_chain_mem(rdev, context->buffer, region_device_sz(&context->rdev)) != 0) {
411 		ERROR("%s(name='%s') chaining failed\n", __func__, name);
412 
413 		err = CB_ERR;
414 		goto out;
415 	}
416 
417 	err = CB_SUCCESS;
418 
419 	DEBUG("%s(name='%s') preload successful\n", __func__, name);
420 
421 out:
422 	free_cbfs_preload_context(context);
423 
424 	return err;
425 }
426 
do_alloc(union cbfs_mdata * mdata,struct region_device * rdev,cbfs_allocator_t allocator,void * arg,size_t * size_out,bool skip_verification)427 static void *do_alloc(union cbfs_mdata *mdata, struct region_device *rdev,
428 		      cbfs_allocator_t allocator, void *arg, size_t *size_out,
429 		      bool skip_verification)
430 {
431 	size_t size = region_device_sz(rdev);
432 	void *loc = NULL;
433 
434 	uint32_t compression = CBFS_COMPRESS_NONE;
435 	const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(mdata,
436 				CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
437 	if (cattr) {
438 		compression = be32toh(cattr->compression);
439 		size = be32toh(cattr->decompressed_size);
440 	}
441 
442 	if (size_out)
443 		*size_out = size;
444 
445 	/* allocator == NULL means do a cbfs_map() */
446 	if (allocator) {
447 		loc = allocator(arg, size, mdata);
448 	} else if (compression == CBFS_COMPRESS_NONE) {
449 		void *mapping = rdev_mmap_full(rdev);
450 		if (!mapping)
451 			return NULL;
452 		if (cbfs_file_hash_mismatch(mapping, size, mdata, skip_verification)) {
453 			rdev_munmap(rdev, mapping);
454 			return NULL;
455 		}
456 		return mapping;
457 	} else if (!cbfs_cache.size) {
458 		/* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
459 		 * memlayout. */
460 		ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata->h.filename);
461 		return NULL;
462 	} else {
463 		loc = mem_pool_alloc(&cbfs_cache, size);
464 	}
465 
466 	if (!loc) {
467 		ERROR("'%s' allocation failure\n", mdata->h.filename);
468 		return NULL;
469 	}
470 
471 	size = cbfs_load_and_decompress(rdev, loc, size, compression, mdata, skip_verification);
472 	if (!size)
473 		return NULL;
474 
475 	return loc;
476 }
477 
_cbfs_alloc(const char * name,cbfs_allocator_t allocator,void * arg,size_t * size_out,bool force_ro,enum cbfs_type * type)478 void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
479 		  size_t *size_out, bool force_ro, enum cbfs_type *type)
480 {
481 	struct region_device rdev;
482 	bool preload_successful = false;
483 	union cbfs_mdata mdata;
484 
485 	DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__, name, allocator,
486 	      arg, force_ro ? "true" : "false", type ? *type : -1);
487 
488 	if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
489 		return NULL;
490 
491 	if (type) {
492 		const enum cbfs_type real_type = be32toh(mdata.h.type);
493 		if (*type == CBFS_TYPE_QUERY)
494 			*type = real_type;
495 		else if (*type != real_type) {
496 			ERROR("'%s' type mismatch (is %u, expected %u)\n",
497 			      mdata.h.filename, real_type, *type);
498 			return NULL;
499 		}
500 	}
501 
502 	/* Update the rdev with the preload content */
503 	if (!force_ro && get_preload_rdev(&rdev, name) == CB_SUCCESS)
504 		preload_successful = true;
505 
506 	void *ret = do_alloc(&mdata, &rdev, allocator, arg, size_out, false);
507 
508 	/* When using cbfs_preload we need to free the preload buffer after populating the
509 	 * destination buffer. We know we must have a mem_rdev here, so extra mmap is fine. */
510 	if (preload_successful)
511 		cbfs_unmap(rdev_mmap_full(&rdev));
512 
513 	return ret;
514 }
515 
_cbfs_unverified_area_alloc(const char * area,const char * name,cbfs_allocator_t allocator,void * arg,size_t * size_out)516 void *_cbfs_unverified_area_alloc(const char *area, const char *name,
517 				  cbfs_allocator_t allocator, void *arg, size_t *size_out)
518 {
519 	struct region_device area_rdev, file_rdev;
520 	union cbfs_mdata mdata;
521 	size_t data_offset;
522 
523 	DEBUG("%s(area='%s', name='%s', alloc=%p(%p))\n", __func__, area, name, allocator, arg);
524 
525 	if (fmap_locate_area_as_rdev(area, &area_rdev))
526 		return NULL;
527 
528 	if (cbfs_lookup(&area_rdev, name, &mdata, &data_offset, NULL)) {
529 		ERROR("'%s' not found in '%s'\n", name, area);
530 		return NULL;
531 	}
532 
533 	if (rdev_chain(&file_rdev, &area_rdev, data_offset, be32toh(mdata.h.len)))
534 		return NULL;
535 
536 	return do_alloc(&mdata, &file_rdev, allocator, arg, size_out, true);
537 }
538 
_cbfs_default_allocator(void * arg,size_t size,const union cbfs_mdata * unused)539 void *_cbfs_default_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
540 {
541 	struct _cbfs_default_allocator_arg *darg = arg;
542 	if (size > darg->buf_size)
543 		return NULL;
544 	return darg->buf;
545 }
546 
_cbfs_cbmem_allocator(void * arg,size_t size,const union cbfs_mdata * unused)547 void *_cbfs_cbmem_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
548 {
549 	return cbmem_add((uintptr_t)arg, size);
550 }
551 
cbfs_prog_stage_load(struct prog * pstage)552 enum cb_err cbfs_prog_stage_load(struct prog *pstage)
553 {
554 	union cbfs_mdata mdata;
555 	struct region_device rdev;
556 	enum cb_err err;
557 
558 	prog_locate_hook(pstage);
559 
560 	if ((err = _cbfs_boot_lookup(prog_name(pstage), false, &mdata, &rdev)))
561 		return err;
562 
563 	assert(be32toh(mdata.h.type) == CBFS_TYPE_STAGE);
564 	pstage->cbfs_type = CBFS_TYPE_STAGE;
565 
566 	enum cbfs_compression compression = CBFS_COMPRESS_NONE;
567 	const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
568 				CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
569 	if (cattr)
570 		compression = be32toh(cattr->compression);
571 
572 	const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata,
573 				CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr));
574 	if (!sattr)
575 		return CB_ERR;
576 	prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr),
577 		      be32toh(sattr->memlen));
578 	prog_set_entry(pstage, prog_start(pstage) +
579 			       be32toh(sattr->entry_offset), NULL);
580 
581 	/* Hacky way to not load programs over read only media. The stages
582 	 * that would hit this path initialize themselves. */
583 	if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
584 	    !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
585 		void *mapping = rdev_mmap_full(&rdev);
586 		rdev_munmap(&rdev, mapping);
587 		if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), &mdata, false))
588 			return CB_CBFS_HASH_MISMATCH;
589 		if (mapping == prog_start(pstage))
590 			return CB_SUCCESS;
591 	}
592 
593 	/* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the
594 	   compressed data to the end of the buffer and point &rdev to that memory location. */
595 	if (cbfs_lz4_enabled() && compression == CBFS_COMPRESS_LZ4) {
596 		size_t in_size = region_device_sz(&rdev);
597 		void *compr_start = prog_start(pstage) + prog_size(pstage) - in_size;
598 		if (rdev_readat(&rdev, compr_start, 0, in_size) != in_size)
599 			return CB_ERR;
600 		rdev_chain_mem(&rdev, compr_start, in_size);
601 	}
602 
603 	size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage),
604 						compression, &mdata, false);
605 	if (!fsize)
606 		return CB_ERR;
607 
608 	/* Clear area not covered by file. */
609 	memset(prog_start(pstage) + fsize, 0, prog_size(pstage) - fsize);
610 
611 	prog_segment_loaded((uintptr_t)prog_start(pstage), prog_size(pstage),
612 			    SEG_FINAL);
613 
614 	return CB_SUCCESS;
615 }
616 
cbfs_boot_device_find_mcache(struct cbfs_boot_device * cbd,uint32_t id)617 void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
618 {
619 	if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
620 		return;
621 
622 	if (cbd->mcache_size)
623 		return;
624 
625 	const struct cbmem_entry *entry;
626 	if (ENV_HAS_CBMEM &&
627 	    (entry = cbmem_entry_find(id))) {
628 		cbd->mcache = cbmem_entry_start(entry);
629 		cbd->mcache_size = cbmem_entry_size(entry);
630 	} else if (ENV_ROMSTAGE_OR_BEFORE) {
631 		u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
632 			CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
633 		boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary, CBFS_MCACHE_ALIGNMENT);
634 		if (id == CBMEM_ID_CBFS_RO_MCACHE) {
635 			cbd->mcache = _cbfs_mcache;
636 			cbd->mcache_size = boundary - _cbfs_mcache;
637 		} else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
638 			cbd->mcache = boundary;
639 			cbd->mcache_size = _ecbfs_mcache - boundary;
640 		}
641 	}
642 }
643 
cbfs_init_boot_device(const struct cbfs_boot_device * cbd,struct vb2_hash * mdata_hash)644 enum cb_err cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
645 				  struct vb2_hash *mdata_hash)
646 {
647 	/* If we have an mcache, mcache_build() will also check mdata hash. */
648 	if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
649 		return cbfs_mcache_build(&cbd->rdev, cbd->mcache, cbd->mcache_size, mdata_hash);
650 
651 	/* No mcache and no verification means we have nothing special to do. */
652 	if (!CONFIG(CBFS_VERIFICATION) || !mdata_hash)
653 		return CB_SUCCESS;
654 
655 	/* Verification only: use cbfs_walk() without a walker() function to just run through
656 	   the CBFS once, will return NOT_FOUND by default. */
657 	enum cb_err err = cbfs_walk(&cbd->rdev, NULL, NULL, mdata_hash, 0);
658 	if (err == CB_CBFS_NOT_FOUND)
659 		err = CB_SUCCESS;
660 	return err;
661 }
662 
cbfs_get_boot_device(bool force_ro)663 const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
664 {
665 	static struct cbfs_boot_device ro;
666 
667 	/* Ensure we always init RO mcache, even if the first file is from the RW CBFS.
668 	   Otherwise it may not be available when needed in later stages. */
669 	if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
670 		cbfs_get_boot_device(true);
671 
672 	if (!force_ro) {
673 		const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
674 		/* This will return NULL if vboot isn't enabled, didn't run yet or decided to
675 		   boot into recovery mode. */
676 		if (rw)
677 			return rw;
678 	}
679 
680 	/* In rare cases post-RAM stages may run this before cbmem_initialize(), so we can't
681 	   lock in the result of find_mcache() on the first try and should keep trying every
682 	   time until an mcache is found. */
683 	cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
684 
685 	if (region_device_sz(&ro.rdev))
686 		return &ro;
687 
688 	if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
689 		die("Cannot locate primary CBFS");
690 
691 	if (ENV_INITIAL_STAGE) {
692 		enum cb_err err = cbfs_init_boot_device(&ro, metadata_hash_get());
693 		if (err == CB_CBFS_HASH_MISMATCH)
694 			die("RO CBFS metadata hash verification failure");
695 		else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
696 			die("RO mcache overflow breaks TOCTOU safety!\n");
697 		else if (err && err != CB_CBFS_CACHE_FULL)
698 			die("RO CBFS initialization error: %d", err);
699 	}
700 
701 	return &ro;
702 }
703 
704 #if !CONFIG(NO_CBFS_MCACHE)
mcache_to_cbmem(const struct cbfs_boot_device * cbd,u32 cbmem_id)705 static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
706 {
707 	if (!cbd)
708 		return;
709 
710 	size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
711 	void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
712 	if (!cbmem_mcache) {
713 		printk(BIOS_ERR, "Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
714 		       cbmem_id, real_size);
715 		return;
716 	}
717 	memcpy(cbmem_mcache, cbd->mcache, real_size);
718 }
719 
cbfs_mcache_migrate(int unused)720 static void cbfs_mcache_migrate(int unused)
721 {
722 	mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
723 	mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
724 }
725 CBMEM_CREATION_HOOK(cbfs_mcache_migrate);
726 #endif
727