xref: /aosp_15_r20/external/coreboot/src/drivers/mrc_cache/mrc_cache.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <string.h>
4 #include <boot_device.h>
5 #include <bootstate.h>
6 #include <bootmode.h>
7 #include <console/console.h>
8 #include <cbmem.h>
9 #include <elog.h>
10 #include <fmap.h>
11 #include <region_file.h>
12 #include <security/vboot/antirollback.h>
13 #include <security/vboot/mrc_cache_hash_tpm.h>
14 #include <security/vboot/vboot_common.h>
15 #include <spi_flash.h>
16 #include <xxhash.h>
17 
18 #include "mrc_cache.h"
19 
20 #define DEFAULT_MRC_CACHE	"RW_MRC_CACHE"
21 #define VARIABLE_MRC_CACHE	"RW_VAR_MRC_CACHE"
22 #define RECOVERY_MRC_CACHE	"RECOVERY_MRC_CACHE"
23 #define UNIFIED_MRC_CACHE	"UNIFIED_MRC_CACHE"
24 
25 /* Signature "MRCD" was used for older header format before CB:67670. */
26 #define MRC_DATA_SIGNATURE       (('M'<<0)|('R'<<8)|('C'<<16)|('d'<<24))
27 
28 static const uint32_t mrc_invalid_sig = ~MRC_DATA_SIGNATURE;
29 
30 struct mrc_metadata {
31 	uint32_t signature;
32 	uint32_t data_size;
33 	uint32_t data_hash;
34 	uint32_t header_hash;
35 	uint32_t version;
36 } __packed;
37 
38 enum result {
39 	UPDATE_FAILURE		= -1,
40 	UPDATE_SUCCESS		= 0,
41 	ALREADY_UPTODATE	= 1
42 };
43 
44 #define NORMAL_FLAG (1 << 0)
45 #define RECOVERY_FLAG (1 << 1)
46 
47 struct cache_region {
48 	const char *name;
49 	uint32_t cbmem_id;
50 	int type;
51 	int elog_slot;
52 	uint32_t tpm_hash_index;
53 	int flags;
54 };
55 
56 static const struct cache_region recovery_training = {
57 	.name = RECOVERY_MRC_CACHE,
58 	.cbmem_id = CBMEM_ID_MRCDATA,
59 	.type = MRC_TRAINING_DATA,
60 	.elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_RECOVERY,
61 	.tpm_hash_index = MRC_REC_HASH_NV_INDEX,
62 #if CONFIG(HAS_RECOVERY_MRC_CACHE)
63 	.flags = RECOVERY_FLAG,
64 #else
65 	.flags = 0,
66 #endif
67 };
68 
69 static const struct cache_region normal_training = {
70 	.name = DEFAULT_MRC_CACHE,
71 	.cbmem_id = CBMEM_ID_MRCDATA,
72 	.type = MRC_TRAINING_DATA,
73 	.elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_NORMAL,
74 	.tpm_hash_index = MRC_RW_HASH_NV_INDEX,
75 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
76 	/*
77 	 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
78 	 * memory training happens before vboot (in RO) and the
79 	 * mrc_cache data is always safe to use.
80 	 */
81 	.flags = NORMAL_FLAG | RECOVERY_FLAG,
82 #else
83 	/*
84 	 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
85 	 * vboot (in RW code) and is never safe to use in recovery.
86 	 */
87 	.flags = NORMAL_FLAG,
88 #endif
89 };
90 
91 static const struct cache_region variable_data = {
92 	.name = VARIABLE_MRC_CACHE,
93 	.cbmem_id = CBMEM_ID_VAR_MRCDATA,
94 	.type = MRC_VARIABLE_DATA,
95 	.elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_VARIABLE,
96 	.tpm_hash_index = 0,
97 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
98 	/*
99 	 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
100 	 * memory training happens before vboot (in RO) and the
101 	 * mrc_cache data is always safe to use.
102 	 */
103 	.flags = NORMAL_FLAG | RECOVERY_FLAG,
104 #else
105 	/*
106 	 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
107 	 * vboot (in RW code) and is never safe to use in recovery.
108 	 */
109 	.flags = NORMAL_FLAG,
110 #endif
111 };
112 
113 /* Order matters here for priority in matching. */
114 static const struct cache_region *cache_regions[] = {
115 	&recovery_training,
116 	&normal_training,
117 	&variable_data,
118 };
119 
120 /* TPM MRC hash functionality depends on vboot starting before memory init. */
121 _Static_assert(!CONFIG(MRC_SAVE_HASH_IN_TPM) ||
122 	       CONFIG(VBOOT_STARTS_IN_BOOTBLOCK),
123 	       "for TPM MRC hash functionality, vboot must start in bootblock");
124 
lookup_region_by_name(const char * name,struct region * r)125 static int lookup_region_by_name(const char *name, struct region *r)
126 {
127 	if (fmap_locate_area(name, r) == 0)
128 		return 0;
129 	return -1;
130 }
131 
lookup_region_type(int type)132 static const struct cache_region *lookup_region_type(int type)
133 {
134 	int i;
135 	int flags;
136 
137 	if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled())
138 		flags = RECOVERY_FLAG;
139 	else
140 		flags = NORMAL_FLAG;
141 
142 	for (i = 0; i < ARRAY_SIZE(cache_regions); i++) {
143 		if (cache_regions[i]->type != type)
144 			continue;
145 		if ((cache_regions[i]->flags & flags) == flags)
146 			return cache_regions[i];
147 	}
148 
149 	return NULL;
150 }
151 
lookup_region(struct region * r,int type)152 static const struct cache_region *lookup_region(struct region *r, int type)
153 {
154 	const struct cache_region *cr;
155 
156 	cr = lookup_region_type(type);
157 
158 	if (cr == NULL) {
159 		/* There will be no recovery MRC cache region if (!HAS_RECOVERY_MRC_CACHE &&
160 		   !VBOOT_STARTS_IN_ROMSTAGE). */
161 		printk(BIOS_DEBUG, "MRC: failed to locate region type %d\n", type);
162 		return NULL;
163 	}
164 
165 	if (lookup_region_by_name(cr->name, r) < 0)
166 		return NULL;
167 
168 	return cr;
169 }
170 
mrc_header_valid(struct region_device * rdev,struct mrc_metadata * md)171 static int mrc_header_valid(struct region_device *rdev, struct mrc_metadata *md)
172 {
173 	uint32_t hash;
174 	uint32_t hash_result;
175 	size_t size;
176 
177 	if (rdev_readat(rdev, md, 0, sizeof(*md)) < 0) {
178 		/* When the metadata was invalidated intentionally (for example from the
179 		   previous recovery boot), print a warning instead of an error. */
180 		if (rdev_readat(rdev, md, 0, sizeof(mrc_invalid_sig)) >= 0 &&
181 		    md->signature == mrc_invalid_sig) {
182 			printk(BIOS_INFO, "MRC: metadata was invalidated\n");
183 			return -1;
184 		}
185 
186 		printk(BIOS_ERR, "MRC: couldn't read metadata\n");
187 		return -1;
188 	}
189 
190 	if (md->signature != MRC_DATA_SIGNATURE) {
191 		printk(BIOS_ERR, "MRC: invalid header signature\n");
192 		return -1;
193 	}
194 
195 	/* Compute hash over header with 0 as the value. */
196 	hash = md->header_hash;
197 	md->header_hash = 0;
198 	hash_result = xxh32(md, sizeof(*md), 0);
199 
200 	if (hash != hash_result) {
201 		printk(BIOS_ERR, "MRC: header hash mismatch: %x vs %x\n",
202 			hash, hash_result);
203 		return -1;
204 	}
205 
206 	/* Put back original. */
207 	md->header_hash = hash;
208 
209 	/* Re-size the region device according to the metadata as a region_file
210 	 * does block allocation. */
211 	size = sizeof(*md) + md->data_size;
212 	if (rdev_chain(rdev, rdev, 0, size) < 0) {
213 		printk(BIOS_ERR, "MRC: size exceeds rdev size: %zx vs %zx\n",
214 			size, region_device_sz(rdev));
215 		return -1;
216 	}
217 
218 	return 0;
219 }
220 
mrc_data_valid(int type,const struct mrc_metadata * md,void * data,size_t data_size)221 static int mrc_data_valid(int type, const struct mrc_metadata *md,
222 			  void *data, size_t data_size)
223 {
224 	uint32_t hash;
225 	const struct cache_region *cr = lookup_region_type(type);
226 	uint32_t hash_idx;
227 
228 	if (cr == NULL)
229 		return -1;
230 
231 	if (md->data_size != data_size)
232 		return -1;
233 
234 	hash_idx = cr->tpm_hash_index;
235 	if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM)) {
236 		if (!mrc_cache_verify_hash(hash_idx, data, data_size))
237 			return -1;
238 	} else {
239 		hash = xxh32(data, data_size, 0);
240 
241 		if (md->data_hash != hash) {
242 			printk(BIOS_ERR, "MRC: data hash mismatch: %x vs %x\n",
243 			       md->data_hash, hash);
244 			return -1;
245 		}
246 	}
247 
248 	return 0;
249 }
250 
mrc_cache_get_latest_slot_info(const char * name,const struct region_device * backing_rdev,struct mrc_metadata * md,struct region_file * cache_file,struct region_device * rdev,bool fail_bad_data)251 static int mrc_cache_get_latest_slot_info(const char *name,
252 				const struct region_device *backing_rdev,
253 				struct mrc_metadata *md,
254 				struct region_file *cache_file,
255 				struct region_device *rdev,
256 				bool fail_bad_data)
257 {
258 	/* Init and obtain a handle to the file data. */
259 	if (region_file_init(cache_file, backing_rdev) < 0) {
260 		printk(BIOS_ERR, "MRC: region file invalid in '%s'\n", name);
261 		return -1;
262 	}
263 
264 	/* Provide a 0 sized region_device from here on out so the caller
265 	 * has a valid yet unusable region_device. */
266 	rdev_chain(rdev, backing_rdev, 0, 0);
267 
268 	/* No data to return. */
269 	if (region_file_data(cache_file, rdev) < 0) {
270 		printk(BIOS_NOTICE, "MRC: no data in '%s'\n", name);
271 		return fail_bad_data ? -1 : 0;
272 	}
273 
274 	/* Validate header and resize region to reflect actual usage on the
275 	 * saved medium (including metadata and data). */
276 	if (mrc_header_valid(rdev, md) < 0)
277 		return fail_bad_data ? -1 : 0;
278 
279 	return 0;
280 }
281 
mrc_cache_find_current(int type,uint32_t version,struct region_device * rdev,struct mrc_metadata * md)282 static int mrc_cache_find_current(int type, uint32_t version,
283 				  struct region_device *rdev,
284 				  struct mrc_metadata *md)
285 {
286 	const struct cache_region *cr;
287 	struct region region;
288 	struct region_device read_rdev;
289 	struct region_file cache_file;
290 	size_t data_size;
291 	const size_t md_size = sizeof(*md);
292 	const bool fail_bad_data = true;
293 
294 	/*
295 	 * In recovery mode, force retraining if the memory retrain
296 	 * switch is set.
297 	 */
298 	if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled()
299 	    && get_recovery_mode_retrain_switch())
300 		return -1;
301 
302 	cr = lookup_region(&region, type);
303 
304 	if (cr == NULL)
305 		return -1;
306 
307 	if (boot_device_ro_subregion(&region, &read_rdev) < 0)
308 		return -1;
309 
310 	if (mrc_cache_get_latest_slot_info(cr->name,
311 					   &read_rdev,
312 					   md,
313 					   &cache_file,
314 					   rdev,
315 					   fail_bad_data) < 0)
316 		return -1;
317 
318 	if (version != md->version) {
319 		printk(BIOS_INFO, "MRC: version mismatch: %x vs %x\n",
320 			md->version, version);
321 		return -1;
322 	}
323 
324 	/* Re-size rdev to only contain the data. i.e. remove metadata. */
325 	data_size = md->data_size;
326 	return rdev_chain(rdev, rdev, md_size, data_size);
327 }
328 
mrc_cache_load_current(int type,uint32_t version,void * buffer,size_t buffer_size)329 ssize_t mrc_cache_load_current(int type, uint32_t version, void *buffer,
330 			      size_t buffer_size)
331 {
332 	struct region_device rdev;
333 	struct mrc_metadata md;
334 	ssize_t data_size;
335 
336 	if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
337 		return -1;
338 
339 	data_size = region_device_sz(&rdev);
340 	if (buffer_size < data_size)
341 		return -1;
342 
343 	if (rdev_readat(&rdev, buffer, 0, data_size) != data_size)
344 		return -1;
345 
346 	if (mrc_data_valid(type, &md, buffer, data_size) < 0)
347 		return -1;
348 
349 	return data_size;
350 }
351 
mrc_cache_current_mmap_leak(int type,uint32_t version,size_t * data_size)352 void *mrc_cache_current_mmap_leak(int type, uint32_t version,
353 				  size_t *data_size)
354 {
355 	struct region_device rdev;
356 	void *data;
357 	size_t region_device_size;
358 	struct mrc_metadata md;
359 
360 	if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
361 		return NULL;
362 
363 	region_device_size = region_device_sz(&rdev);
364 	if (data_size)
365 		*data_size = region_device_size;
366 	data = rdev_mmap_full(&rdev);
367 
368 	if (data == NULL) {
369 		printk(BIOS_INFO, "MRC: mmap failure.\n");
370 		return NULL;
371 	}
372 
373 	if (mrc_data_valid(type, &md, data, region_device_size) < 0)
374 		return NULL;
375 
376 	return data;
377 }
378 
mrc_cache_needs_update(const struct region_device * rdev,const struct mrc_metadata * new_md,size_t new_data_size)379 static bool mrc_cache_needs_update(const struct region_device *rdev,
380 				   const struct mrc_metadata *new_md,
381 				   size_t new_data_size)
382 {
383 	void *mapping;
384 	size_t old_data_size = region_device_sz(rdev) - sizeof(struct mrc_metadata);
385 	bool need_update = false;
386 
387 	if (new_data_size != old_data_size)
388 		return true;
389 
390 	mapping = rdev_mmap_full(rdev);
391 	if (mapping == NULL) {
392 		printk(BIOS_ERR, "MRC: cannot mmap existing cache.\n");
393 		return true;
394 	}
395 
396 	/*
397 	 * Compare the old and new metadata only. If the data hashes don't
398 	 * match, the comparison will fail.
399 	 */
400 	if (memcmp(new_md, mapping, sizeof(struct mrc_metadata)))
401 		need_update = true;
402 
403 	rdev_munmap(rdev, mapping);
404 
405 	return need_update;
406 }
407 
log_event_cache_update(uint8_t slot,enum result res)408 static void log_event_cache_update(uint8_t slot, enum result res)
409 {
410 	const int type = ELOG_TYPE_MEM_CACHE_UPDATE;
411 	struct elog_event_mem_cache_update event = {
412 		.slot = slot
413 	};
414 
415 	/* Filter through interesting events only */
416 	switch (res) {
417 	case UPDATE_FAILURE:
418 		event.status = ELOG_MEM_CACHE_UPDATE_STATUS_FAIL;
419 		break;
420 	case UPDATE_SUCCESS:
421 		event.status = ELOG_MEM_CACHE_UPDATE_STATUS_SUCCESS;
422 		break;
423 	default:
424 		return;
425 	}
426 
427 	if (elog_add_event_raw(type, &event, sizeof(event)) < 0)
428 		printk(BIOS_ERR, "Failed to log mem cache update event.\n");
429 }
430 
431 /* During ramstage this code purposefully uses incoherent transactions between
432  * read and write. The read assumes a memory-mapped boot device that can be used
433  * to quickly locate and compare the up-to-date data. However, when an update
434  * is required it uses the writable region access to perform the update. */
update_mrc_cache_by_type(int type,struct mrc_metadata * new_md,const void * new_data,size_t new_data_size)435 static void update_mrc_cache_by_type(int type,
436 				     struct mrc_metadata *new_md,
437 				     const void *new_data,
438 				     size_t new_data_size)
439 {
440 	const struct cache_region *cr;
441 	struct region region;
442 	struct region_device read_rdev;
443 	struct region_device write_rdev;
444 	struct region_file cache_file;
445 	struct mrc_metadata md;
446 	struct incoherent_rdev backing_irdev;
447 	const struct region_device *backing_rdev;
448 	struct region_device latest_rdev;
449 	const bool fail_bad_data = false;
450 	uint32_t hash_idx;
451 
452 	cr = lookup_region(&region, type);
453 
454 	if (cr == NULL)
455 		return;
456 
457 	printk(BIOS_DEBUG, "MRC: Checking cached data update for '%s'.\n",
458 		cr->name);
459 
460 	if (boot_device_ro_subregion(&region, &read_rdev) < 0)
461 		return;
462 
463 	if (boot_device_rw_subregion(&region, &write_rdev) < 0)
464 		return;
465 
466 	backing_rdev = incoherent_rdev_init(&backing_irdev, &region, &read_rdev,
467 						&write_rdev);
468 
469 	if (backing_rdev == NULL)
470 		return;
471 
472 	/* Note that mrc_cache_get_latest_slot_info doesn't check the
473 	 * validity of the current slot.  If the slot is invalid,
474 	 * we'll overwrite it anyway when we update the mrc_cache.
475 	 */
476 	if (mrc_cache_get_latest_slot_info(cr->name,
477 					   backing_rdev,
478 					   &md,
479 					   &cache_file,
480 					   &latest_rdev,
481 					   fail_bad_data) < 0)
482 
483 		return;
484 
485 	if (!mrc_cache_needs_update(&latest_rdev, new_md, new_data_size)) {
486 		printk(BIOS_DEBUG, "MRC: '%s' does not need update.\n", cr->name);
487 		log_event_cache_update(cr->elog_slot, ALREADY_UPTODATE);
488 		return;
489 	}
490 
491 	printk(BIOS_DEBUG, "MRC: cache data '%s' needs update.\n", cr->name);
492 
493 	struct update_region_file_entry entries[] = {
494 		[0] = {
495 			.size = sizeof(*new_md),
496 			.data = new_md,
497 		},
498 		[1] = {
499 			.size = new_data_size,
500 			.data = new_data,
501 		},
502 	};
503 	if (region_file_update_data_arr(&cache_file, entries, ARRAY_SIZE(entries)) < 0) {
504 		printk(BIOS_ERR, "MRC: failed to update '%s'.\n", cr->name);
505 		log_event_cache_update(cr->elog_slot, UPDATE_FAILURE);
506 	} else {
507 		printk(BIOS_DEBUG, "MRC: updated '%s'.\n", cr->name);
508 		log_event_cache_update(cr->elog_slot, UPDATE_SUCCESS);
509 		hash_idx = cr->tpm_hash_index;
510 		if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM))
511 			mrc_cache_update_hash(hash_idx, new_data, new_data_size);
512 	}
513 }
514 
515 /* Read flash status register to determine if write protect is active */
nvm_is_write_protected(void)516 static int nvm_is_write_protected(void)
517 {
518 	u8 sr1;
519 	u8 wp_gpio;
520 	u8 wp_spi;
521 
522 	if (!CONFIG(CHROMEOS))
523 		return 0;
524 
525 	if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
526 		return 0;
527 
528 	/* Read Write Protect GPIO if available */
529 	wp_gpio = get_write_protect_state();
530 
531 	/* Read Status Register 1 */
532 	if (spi_flash_status(boot_device_spi_flash(), &sr1) < 0) {
533 		printk(BIOS_ERR, "Failed to read SPI status register 1\n");
534 		return -1;
535 	}
536 	wp_spi = !!(sr1 & 0x80);
537 
538 	printk(BIOS_DEBUG, "SPI flash protection: WPSW=%d SRP0=%d\n",
539 		wp_gpio, wp_spi);
540 
541 	return wp_gpio && wp_spi;
542 }
543 
544 /* Apply protection to a range of flash */
nvm_protect(const struct region * r)545 static int nvm_protect(const struct region *r)
546 {
547 	if (!CONFIG(MRC_SETTINGS_PROTECT))
548 		return 0;
549 
550 	if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
551 		return 0;
552 
553 	return spi_flash_ctrlr_protect_region(boot_device_spi_flash(), r, WRITE_PROTECT);
554 }
555 
556 /* Protect mrc region with a Protected Range Register */
protect_mrc_cache(const char * name)557 static int protect_mrc_cache(const char *name)
558 {
559 	struct region region;
560 
561 	if (!CONFIG(MRC_SETTINGS_PROTECT))
562 		return 0;
563 
564 	if (lookup_region_by_name(name, &region) < 0) {
565 		printk(BIOS_INFO, "MRC: Could not find region '%s'\n", name);
566 		return -1;
567 	}
568 
569 	if (nvm_is_write_protected() <= 0) {
570 		printk(BIOS_INFO, "MRC: NOT enabling PRR for '%s'.\n", name);
571 		return 0;
572 	}
573 
574 	if (nvm_protect(&region) < 0) {
575 		printk(BIOS_ERR, "MRC: ERROR setting PRR for '%s'.\n", name);
576 		return -1;
577 	}
578 
579 	printk(BIOS_INFO, "MRC: Enabled Protected Range on '%s'.\n", name);
580 	return 0;
581 }
582 
protect_mrc_region(void)583 static void protect_mrc_region(void)
584 {
585 	/*
586 	 * Check if there is a single unified region that encompasses both
587 	 * RECOVERY_MRC_CACHE and DEFAULT_MRC_CACHE. In that case protect the
588 	 * entire region using a single PRR.
589 	 *
590 	 * If we are not able to protect the entire region, try protecting
591 	 * individual regions next.
592 	 */
593 	if (protect_mrc_cache(UNIFIED_MRC_CACHE) == 0)
594 		return;
595 
596 	if (CONFIG(HAS_RECOVERY_MRC_CACHE))
597 		protect_mrc_cache(RECOVERY_MRC_CACHE);
598 
599 	protect_mrc_cache(DEFAULT_MRC_CACHE);
600 }
601 
invalidate_normal_cache(void)602 static void invalidate_normal_cache(void)
603 {
604 	struct region_file cache_file;
605 	struct region_device rdev;
606 	const char *name = DEFAULT_MRC_CACHE;
607 
608 	/*
609 	 * If !HAS_RECOVERY_MRC_CACHE and VBOOT_STARTS_IN_ROMSTAGE is
610 	 * selected, this means that memory training occurs before
611 	 * verified boot (in RO), so normal mode cache does not need
612 	 * to be invalidated.
613 	 */
614 	if (!CONFIG(HAS_RECOVERY_MRC_CACHE) && CONFIG(VBOOT_STARTS_IN_ROMSTAGE))
615 		return;
616 
617 	/* We only invalidate the normal cache in recovery mode. */
618 	if (!vboot_recovery_mode_enabled())
619 		return;
620 
621 	/*
622 	 * For platforms with a recovery mrc_cache, no need to
623 	 * invalidate when retrain switch is not set.
624 	 */
625 	if (CONFIG(HAS_RECOVERY_MRC_CACHE) && !get_recovery_mode_retrain_switch())
626 		return;
627 
628 	if (fmap_locate_area_as_rdev_rw(name, &rdev) < 0) {
629 		printk(BIOS_ERR, "MRC: Couldn't find '%s' region. Invalidation failed\n",
630 			name);
631 		return;
632 	}
633 
634 	if (region_file_init(&cache_file, &rdev) < 0) {
635 		printk(BIOS_ERR, "MRC: region file invalid for '%s'. Invalidation failed\n",
636 			name);
637 		return;
638 	}
639 
640 	/* Push an update that consists of 4 bytes that is smaller than the
641 	 * MRC metadata as well as an invalid signature. */
642 	if (region_file_update_data(&cache_file, &mrc_invalid_sig,
643 				    sizeof(mrc_invalid_sig)) < 0)
644 		printk(BIOS_ERR, "MRC: invalidation failed for '%s'.\n", name);
645 }
646 
update_mrc_cache_from_cbmem(int type)647 static void update_mrc_cache_from_cbmem(int type)
648 {
649 	const struct cache_region *cr;
650 	struct region region;
651 	const struct cbmem_entry *to_be_updated;
652 
653 	cr = lookup_region(&region, type);
654 
655 	if (cr == NULL) {
656 		printk(BIOS_INFO, "MRC: could not find cache_region type %d\n", type);
657 		return;
658 	}
659 
660 	to_be_updated = cbmem_entry_find(cr->cbmem_id);
661 
662 	if (to_be_updated == NULL) {
663 		printk(BIOS_INFO, "MRC: No data in cbmem for '%s'.\n",
664 		       cr->name);
665 		return;
666 	}
667 
668 	update_mrc_cache_by_type(type,
669 				 /* pointer to mrc_cache entry metadata header */
670 				 cbmem_entry_start(to_be_updated),
671 				 /* pointer to start of mrc_cache entry data */
672 				 cbmem_entry_start(to_be_updated) +
673 					sizeof(struct mrc_metadata),
674 				 /* size of just data portion of the entry */
675 				 cbmem_entry_size(to_be_updated) -
676 					sizeof(struct mrc_metadata));
677 }
678 
finalize_mrc_cache(void * unused)679 static void finalize_mrc_cache(void *unused)
680 {
681 	if (CONFIG(MRC_STASH_TO_CBMEM)) {
682 		update_mrc_cache_from_cbmem(MRC_TRAINING_DATA);
683 
684 		if (CONFIG(MRC_SETTINGS_VARIABLE_DATA))
685 			update_mrc_cache_from_cbmem(MRC_VARIABLE_DATA);
686 	}
687 
688 	invalidate_normal_cache();
689 
690 	protect_mrc_region();
691 }
692 
mrc_cache_stash_data(int type,uint32_t version,const void * data,size_t size)693 int mrc_cache_stash_data(int type, uint32_t version, const void *data,
694 			 size_t size)
695 {
696 	const struct cache_region *cr;
697 
698 	struct mrc_metadata md = {
699 		.signature = MRC_DATA_SIGNATURE,
700 		.data_size = size,
701 		.version = version,
702 		.data_hash = xxh32(data, size, 0),
703 	};
704 	md.header_hash = xxh32(&md, sizeof(md), 0);
705 
706 	if (CONFIG(MRC_STASH_TO_CBMEM)) {
707 		/* Store data in cbmem for use in ramstage */
708 		struct mrc_metadata *cbmem_md;
709 		size_t cbmem_size;
710 		cbmem_size = sizeof(*cbmem_md) + size;
711 
712 		cr = lookup_region_type(type);
713 		if (cr == NULL) {
714 			printk(BIOS_INFO, "MRC: No region type found. Skip adding to cbmem for type %d.\n",
715 				type);
716 			return 0;
717 		}
718 
719 		cbmem_md = cbmem_add(cr->cbmem_id, cbmem_size);
720 
721 		if (cbmem_md == NULL) {
722 			printk(BIOS_ERR, "MRC: failed to add '%s' to cbmem.\n",
723 			       cr->name);
724 			return -1;
725 		}
726 
727 		memcpy(cbmem_md, &md, sizeof(*cbmem_md));
728 		/* cbmem_md + 1 is the pointer to the mrc_cache data */
729 		memcpy(cbmem_md + 1, data, size);
730 	} else {
731 		/* Otherwise store to mrc_cache right away */
732 		update_mrc_cache_by_type(type, &md, data, size);
733 	}
734 	return 0;
735 }
736 
737 /*
738  * Ensures MRC training data is stored into SPI after PCI enumeration is done.
739  * Some implementations may require this to be later than others.
740  */
741 #if CONFIG(MRC_WRITE_NV_LATE)
742 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME_CHECK, BS_ON_ENTRY, finalize_mrc_cache, NULL);
743 #else
744 BOOT_STATE_INIT_ENTRY(BS_DEV_ENUMERATE, BS_ON_EXIT, finalize_mrc_cache, NULL);
745 #endif
746