xref: /aosp_15_r20/external/coreboot/src/drivers/elog/elog.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <acpi/acpi.h>
4 #include <boot_device.h>
5 #include <bootstate.h>
6 #include <cbmem.h>
7 #include <commonlib/bsd/bcd.h>
8 #include <commonlib/bsd/elog.h>
9 #include <commonlib/region.h>
10 #include <console/console.h>
11 #include <elog.h>
12 #include <fmap.h>
13 #include <lib.h>
14 #include <post.h>
15 #include <rtc.h>
16 #include <smbios.h>
17 #include <stdint.h>
18 #include <string.h>
19 #include <timestamp.h>
20 
21 #define ELOG_MIN_AVAILABLE_ENTRIES	2  /* Shrink when this many can't fit */
22 #define ELOG_SHRINK_PERCENTAGE		25 /* Percent of total area to remove */
23 
24 #if CONFIG(ELOG_DEBUG)
25 #define elog_debug(STR...) printk(BIOS_DEBUG, STR)
26 #else
27 #define elog_debug(STR...)
28 #endif
29 
30 #define NV_NEEDS_ERASE (~(size_t)0)
31 enum elog_init_state {
32 	ELOG_UNINITIALIZED = 0,
33 	ELOG_INITIALIZED,
34 	ELOG_BROKEN,
35 };
36 
37 struct elog_state {
38 	u16 full_threshold;
39 	u16 shrink_size;
40 
41 	/*
42 	 * The non-volatile storage chases the mirrored copy. When nv_last_write
43 	 * is less than the mirrored last write the non-volatile storage needs
44 	 * to be updated.
45 	 */
46 	size_t mirror_last_write;
47 	size_t nv_last_write;
48 
49 	struct region_device nv_dev;
50 	/* Device that mirrors the eventlog in memory. */
51 	struct region_device mirror_dev;
52 
53 	enum elog_init_state elog_initialized;
54 };
55 
56 static struct elog_state elog_state;
57 
58 #define ELOG_SIZE (4 * KiB)
59 static uint8_t elog_mirror_buf[ELOG_SIZE];
60 
mirror_dev_get(void)61 static inline struct region_device *mirror_dev_get(void)
62 {
63 	return &elog_state.mirror_dev;
64 }
65 
elog_events_start(void)66 static size_t elog_events_start(void)
67 {
68 	/* Events are added directly after the header. */
69 	return sizeof(struct elog_header);
70 }
71 
elog_events_total_space(void)72 static size_t elog_events_total_space(void)
73 {
74 	return region_device_sz(&elog_state.nv_dev) - elog_events_start();
75 }
76 
elog_get_event_buffer(size_t offset,size_t size)77 static struct event_header *elog_get_event_buffer(size_t offset, size_t size)
78 {
79 	return rdev_mmap(mirror_dev_get(), offset, size);
80 }
81 
elog_get_next_event_buffer(size_t size)82 static struct event_header *elog_get_next_event_buffer(size_t size)
83 {
84 	elog_debug("ELOG: new event at offset 0x%zx\n",
85 		   elog_state.mirror_last_write);
86 	return elog_get_event_buffer(elog_state.mirror_last_write, size);
87 }
88 
elog_put_event_buffer(struct event_header * event)89 static void elog_put_event_buffer(struct event_header *event)
90 {
91 	rdev_munmap(mirror_dev_get(), event);
92 }
93 
elog_mirror_reset_last_write(void)94 static size_t elog_mirror_reset_last_write(void)
95 {
96 	/* Return previous write value. */
97 	size_t prev = elog_state.mirror_last_write;
98 
99 	elog_state.mirror_last_write = 0;
100 	return prev;
101 }
102 
elog_mirror_increment_last_write(size_t size)103 static void elog_mirror_increment_last_write(size_t size)
104 {
105 	elog_state.mirror_last_write += size;
106 }
107 
elog_nv_reset_last_write(void)108 static void elog_nv_reset_last_write(void)
109 {
110 	elog_state.nv_last_write = 0;
111 }
112 
elog_nv_increment_last_write(size_t size)113 static void elog_nv_increment_last_write(size_t size)
114 {
115 	elog_state.nv_last_write += size;
116 }
117 
elog_nv_needs_possible_erase(void)118 static void elog_nv_needs_possible_erase(void)
119 {
120 	/* If last write is 0 it means it is already erased. */
121 	if (elog_state.nv_last_write != 0)
122 		elog_state.nv_last_write = NV_NEEDS_ERASE;
123 }
124 
elog_should_shrink(void)125 static bool elog_should_shrink(void)
126 {
127 	return elog_state.mirror_last_write >= elog_state.full_threshold;
128 }
129 
elog_nv_needs_erase(void)130 static bool elog_nv_needs_erase(void)
131 {
132 	return elog_state.nv_last_write == NV_NEEDS_ERASE;
133 }
134 
elog_nv_needs_update(void)135 static bool elog_nv_needs_update(void)
136 {
137 	return elog_state.nv_last_write != elog_state.mirror_last_write;
138 }
139 
elog_nv_region_to_update(size_t * offset)140 static size_t elog_nv_region_to_update(size_t *offset)
141 {
142 	*offset = elog_state.nv_last_write;
143 	return elog_state.mirror_last_write - elog_state.nv_last_write;
144 }
145 
146 /*
147  * When parsing state from the NV one needs to adjust both the NV and mirror
148  * write state. Therefore, provide helper functions which adjust both
149  * at the same time.
150  */
elog_tandem_reset_last_write(void)151 static void elog_tandem_reset_last_write(void)
152 {
153 	elog_mirror_reset_last_write();
154 	elog_nv_reset_last_write();
155 }
156 
elog_tandem_increment_last_write(size_t size)157 static void elog_tandem_increment_last_write(size_t size)
158 {
159 	elog_mirror_increment_last_write(size);
160 	elog_nv_increment_last_write(size);
161 }
162 
elog_debug_dump_buffer(const char * msg)163 static void elog_debug_dump_buffer(const char *msg)
164 {
165 	struct region_device *rdev;
166 	void *buffer;
167 
168 	if (!CONFIG(ELOG_DEBUG))
169 		return;
170 
171 	elog_debug("%s", msg);
172 
173 	rdev = mirror_dev_get();
174 
175 	buffer = rdev_mmap_full(rdev);
176 
177 	if (buffer == NULL)
178 		return;
179 
180 	hexdump(buffer, region_device_sz(rdev));
181 
182 	rdev_munmap(rdev, buffer);
183 }
184 
185 /*
186  * Check if mirrored buffer is filled with ELOG_TYPE_EOL byte from the
187  * provided offset to the end of the mirrored buffer.
188  */
elog_is_buffer_clear(size_t offset)189 static int elog_is_buffer_clear(size_t offset)
190 {
191 	size_t i;
192 	const struct region_device *rdev = mirror_dev_get();
193 	size_t size = region_device_sz(rdev) - offset;
194 	uint8_t *buffer = rdev_mmap(rdev, offset, size);
195 	int ret = 1;
196 
197 	elog_debug("%s(offset=%zu size=%zu)\n", __func__, offset, size);
198 
199 	if (buffer == NULL)
200 		return 0;
201 
202 	for (i = 0; i < size; i++) {
203 		if (buffer[i] != ELOG_TYPE_EOL) {
204 			ret = 0;
205 			break;
206 		}
207 	}
208 	rdev_munmap(rdev, buffer);
209 	return ret;
210 }
211 
212 /*
213  * Verify if the mirrored elog structure is valid.
214  * Returns 1 if the header is valid, 0 otherwise
215  */
elog_is_header_valid(void)216 static int elog_is_header_valid(void)
217 {
218 	struct elog_header *header;
219 
220 	elog_debug("%s()\n", __func__);
221 
222 	header = rdev_mmap(mirror_dev_get(), 0, sizeof(*header));
223 
224 	if (elog_verify_header(header) != CB_SUCCESS) {
225 		printk(BIOS_ERR, "ELOG: failed to verify header.\n");
226 		return 0;
227 	}
228 	return 1;
229 }
230 
231 /*
232  * Validate the event header and data.
233  */
elog_is_event_valid(size_t offset)234 static size_t elog_is_event_valid(size_t offset)
235 {
236 	uint8_t checksum;
237 	struct event_header *event;
238 	uint8_t len;
239 	const size_t len_offset = offsetof(struct event_header, length);
240 	const size_t size = sizeof(len);
241 
242 	/* Read and validate length. */
243 	if (rdev_readat(mirror_dev_get(), &len, offset + len_offset, size) < 0)
244 		return 0;
245 
246 	/* Event length must be at least header size + checksum */
247 	if (len < (sizeof(*event) + sizeof(checksum)))
248 		return 0;
249 
250 	if (len > ELOG_MAX_EVENT_SIZE)
251 		return 0;
252 
253 	event = elog_get_event_buffer(offset, len);
254 	if (!event)
255 		return 0;
256 
257 	/* If event checksum is invalid the area is corrupt */
258 	checksum = elog_checksum_event(event);
259 	elog_put_event_buffer(event);
260 
261 	if (checksum != 0)
262 		return 0;
263 
264 	/* Event is valid */
265 	return len;
266 }
267 
268 /*
269  * Write 'size' bytes of data from provided 'offset' in the mirrored elog to
270  * the flash backing store. This will not erase the flash and it assumes the
271  * flash area has been erased appropriately.
272  */
elog_nv_write(size_t offset,size_t size)273 static void elog_nv_write(size_t offset, size_t size)
274 {
275 	void *address;
276 	const struct region_device *rdev = mirror_dev_get();
277 	if (!size)
278 		return;
279 
280 	address = rdev_mmap(rdev, offset, size);
281 
282 	elog_debug("%s(address=%p offset=0x%08zx size=%zu)\n", __func__,
283 		 address, offset, size);
284 
285 	if (address == NULL)
286 		return;
287 
288 	/* Write the data to flash */
289 	if (rdev_writeat(&elog_state.nv_dev, address, offset, size) != size)
290 		printk(BIOS_ERR, "ELOG: NV Write failed at 0x%zx, size 0x%zx\n",
291 			offset, size);
292 
293 	rdev_munmap(rdev, address);
294 }
295 
296 /*
297  * Erase the first block specified in the address.
298  * Only handles flash area within a single flash block.
299  */
elog_nv_erase(void)300 static void elog_nv_erase(void)
301 {
302 	size_t size = region_device_sz(&elog_state.nv_dev);
303 	elog_debug("%s()\n", __func__);
304 
305 	/* Erase the sectors in this region */
306 	if (rdev_eraseat(&elog_state.nv_dev, 0, size) != size)
307 		printk(BIOS_ERR, "ELOG: erase failure.\n");
308 }
309 
310 /*
311  * Scan the event area and validate each entry and update the ELOG state.
312  */
elog_update_event_buffer_state(void)313 static int elog_update_event_buffer_state(void)
314 {
315 	size_t offset = elog_events_start();
316 
317 	elog_debug("%s()\n", __func__);
318 
319 	/* Go through each event and validate it */
320 	while (1) {
321 		uint8_t type;
322 		const size_t type_offset = offsetof(struct event_header, type);
323 		size_t len;
324 		const size_t size = sizeof(type);
325 
326 		if (rdev_readat(mirror_dev_get(), &type,
327 				offset + type_offset, size) < 0) {
328 			return -1;
329 		}
330 
331 		/* The end of the event marker has been found */
332 		if (type == ELOG_TYPE_EOL)
333 			break;
334 
335 		/* Validate the event */
336 		len = elog_is_event_valid(offset);
337 
338 		if (!len) {
339 			printk(BIOS_ERR, "ELOG: Invalid event @ offset 0x%zx\n",
340 				offset);
341 			return -1;
342 		}
343 
344 		/* Move to the next event */
345 		elog_tandem_increment_last_write(len);
346 		offset += len;
347 	}
348 
349 	/* Ensure the remaining buffer is empty */
350 	if (!elog_is_buffer_clear(offset)) {
351 		printk(BIOS_ERR, "ELOG: buffer not cleared from 0x%zx\n",
352 			offset);
353 		return -1;
354 	}
355 
356 	return 0;
357 }
358 
elog_scan_flash(void)359 static int elog_scan_flash(void)
360 {
361 	elog_debug("%s()\n", __func__);
362 	void *mirror_buffer;
363 	const struct region_device *rdev = mirror_dev_get();
364 
365 	size_t size = region_device_sz(&elog_state.nv_dev);
366 
367 	/* Fill memory buffer by reading from SPI */
368 	mirror_buffer = rdev_mmap_full(rdev);
369 	if (rdev_readat(&elog_state.nv_dev, mirror_buffer, 0, size) != size) {
370 		rdev_munmap(rdev, mirror_buffer);
371 		printk(BIOS_ERR, "ELOG: NV read failure.\n");
372 		return -1;
373 	}
374 	rdev_munmap(rdev, mirror_buffer);
375 
376 	/* No writes have been done yet. */
377 	elog_tandem_reset_last_write();
378 
379 	/* Check if the area is empty or not */
380 	if (elog_is_buffer_clear(0)) {
381 		printk(BIOS_ERR, "ELOG: NV Buffer Cleared.\n");
382 		return -1;
383 	}
384 
385 	/* Indicate that header possibly written. */
386 	elog_tandem_increment_last_write(elog_events_start());
387 
388 	/* Validate the header */
389 	if (!elog_is_header_valid()) {
390 		printk(BIOS_ERR, "ELOG: NV Buffer Invalid.\n");
391 		return -1;
392 	}
393 
394 	return elog_update_event_buffer_state();
395 }
396 
elog_write_header_in_mirror(void)397 static void elog_write_header_in_mirror(void)
398 {
399 	static const struct elog_header header = {
400 		.magic = ELOG_SIGNATURE,
401 		.version = ELOG_VERSION,
402 		.header_size = sizeof(struct elog_header),
403 		.reserved = {
404 			[0] = ELOG_TYPE_EOL,
405 			[1] = ELOG_TYPE_EOL,
406 		},
407 	};
408 
409 	rdev_writeat(mirror_dev_get(), &header, 0, sizeof(header));
410 	elog_mirror_increment_last_write(elog_events_start());
411 }
412 
elog_move_events_to_front(size_t offset,size_t size)413 static void elog_move_events_to_front(size_t offset, size_t size)
414 {
415 	void *src;
416 	void *dest;
417 	size_t start_offset = elog_events_start();
418 	const struct region_device *rdev = mirror_dev_get();
419 
420 	src = rdev_mmap(rdev, offset, size);
421 	dest = rdev_mmap(rdev, start_offset, size);
422 
423 	if (src == NULL || dest == NULL) {
424 		printk(BIOS_ERR, "ELOG: failure moving events!\n");
425 		rdev_munmap(rdev, dest);
426 		rdev_munmap(rdev, src);
427 		return;
428 	}
429 
430 	/* Move the events to the front. */
431 	memmove(dest, src, size);
432 	rdev_munmap(rdev, dest);
433 	rdev_munmap(rdev, src);
434 
435 	/* Mark EOL for previously used buffer until the end. */
436 	offset = start_offset + size;
437 	size = region_device_sz(rdev) - offset;
438 	dest = rdev_mmap(rdev, offset, size);
439 	if (dest == NULL) {
440 		printk(BIOS_ERR, "ELOG: failure filling EOL!\n");
441 		return;
442 	}
443 	memset(dest, ELOG_TYPE_EOL, size);
444 	rdev_munmap(rdev, dest);
445 }
446 
447 /* Perform the shrink and move events returning the size of bytes shrunk. */
elog_do_shrink(size_t requested_size,size_t last_write)448 static size_t elog_do_shrink(size_t requested_size, size_t last_write)
449 {
450 	const struct region_device *rdev = mirror_dev_get();
451 	size_t offset = elog_events_start();
452 	size_t remaining_size;
453 
454 	while (1) {
455 		const size_t type_offset = offsetof(struct event_header, type);
456 		const size_t len_offset = offsetof(struct event_header, length);
457 		const size_t size = sizeof(uint8_t);
458 		uint8_t type;
459 		uint8_t len;
460 
461 		/* Next event has exceeded constraints */
462 		if (offset > requested_size)
463 			break;
464 
465 		if (rdev_readat(rdev, &type, offset + type_offset, size) < 0)
466 			break;
467 
468 		/* Reached the end of the area */
469 		if (type == ELOG_TYPE_EOL)
470 			break;
471 
472 		if (rdev_readat(rdev, &len, offset + len_offset, size) < 0)
473 			break;
474 
475 		offset += len;
476 	}
477 
478 	/*
479 	 * Move the events and update the last write. The last write before
480 	 * shrinking was captured prior to resetting the counter to determine
481 	 * actual size we're keeping.
482 	 */
483 	remaining_size = last_write - offset;
484 	elog_debug("ELOG: shrinking offset: 0x%zx remaining_size: 0x%zx\n",
485 		offset, remaining_size);
486 	elog_move_events_to_front(offset, remaining_size);
487 	elog_mirror_increment_last_write(remaining_size);
488 
489 	/* Return the amount of data removed. */
490 	return offset - elog_events_start();
491 }
492 
493 /*
494  * Shrink the log, deleting old entries and moving the
495  * remaining ones to the front of the log.
496  */
elog_shrink_by_size(size_t requested_size)497 static int elog_shrink_by_size(size_t requested_size)
498 {
499 	size_t shrunk_size;
500 	size_t captured_last_write;
501 	size_t total_event_space = elog_events_total_space();
502 
503 	elog_debug("%s()\n", __func__);
504 
505 	/* Indicate possible erase required. */
506 	elog_nv_needs_possible_erase();
507 
508 	/* Capture the last write to determine data size in buffer to shrink. */
509 	captured_last_write = elog_mirror_reset_last_write();
510 
511 	/* Prepare new header. */
512 	elog_write_header_in_mirror();
513 
514 	/* Determine if any actual shrinking is required. */
515 	if (requested_size >= total_event_space)
516 		shrunk_size = total_event_space;
517 	else
518 		shrunk_size = elog_do_shrink(requested_size,
519 						captured_last_write);
520 
521 	/* Add clear event */
522 	return elog_add_event_word(ELOG_TYPE_LOG_CLEAR, shrunk_size);
523 }
524 
elog_prepare_empty(void)525 static int elog_prepare_empty(void)
526 {
527 	elog_debug("%s()\n", __func__);
528 	return elog_shrink_by_size(elog_events_total_space());
529 }
530 
elog_shrink(void)531 static int elog_shrink(void)
532 {
533 	if (elog_should_shrink())
534 		return elog_shrink_by_size(elog_state.shrink_size);
535 	return 0;
536 }
537 
538 /*
539  * Convert a flash offset into a memory mapped flash address
540  */
elog_flash_offset_to_address(void)541 static inline u8 *elog_flash_offset_to_address(void)
542 {
543 	/* Only support memory-mapped devices. */
544 	if (!CONFIG(BOOT_DEVICE_MEMORY_MAPPED))
545 		return NULL;
546 
547 	if (!region_device_sz(&elog_state.nv_dev))
548 		return NULL;
549 
550 	/* Get a view into the read-only boot device. */
551 	return rdev_mmap(boot_device_ro(),
552 			 region_device_offset(&elog_state.nv_dev),
553 			 region_device_sz(&elog_state.nv_dev));
554 }
555 
556 /*
557  * Fill out SMBIOS Type 15 table entry so the
558  * event log can be discovered at runtime.
559  */
elog_smbios_write_type15(unsigned long * current,int handle)560 int elog_smbios_write_type15(unsigned long *current, int handle)
561 {
562 	uintptr_t log_address;
563 
564 	size_t elog_size = region_device_sz(&elog_state.nv_dev);
565 
566 	if (CONFIG(ELOG_CBMEM)) {
567 		/* Save event log buffer into CBMEM for the OS to read */
568 		void *cbmem = cbmem_add(CBMEM_ID_ELOG, elog_size);
569 		if (cbmem)
570 			rdev_readat(mirror_dev_get(), cbmem, 0, elog_size);
571 		log_address = (uintptr_t)cbmem;
572 	} else {
573 		log_address = (uintptr_t)elog_flash_offset_to_address();
574 	}
575 
576 	if (!log_address) {
577 		printk(BIOS_WARNING, "SMBIOS type 15 log address invalid.\n");
578 		return 0;
579 	}
580 
581 	struct smbios_type15 *t = smbios_carve_table(*current, SMBIOS_EVENT_LOG,
582 						     sizeof(*t), handle);
583 
584 	t->area_length = elog_size - 1;
585 	t->header_offset = 0;
586 	t->data_offset = sizeof(struct elog_header);
587 	t->access_method = SMBIOS_EVENTLOG_ACCESS_METHOD_MMIO32;
588 	t->log_status = SMBIOS_EVENTLOG_STATUS_VALID;
589 	t->change_token = 0;
590 	t->address = log_address;
591 	t->header_format = ELOG_HEADER_TYPE_OEM;
592 	t->log_type_descriptors = 0;
593 	t->log_type_descriptor_length = 2;
594 
595 	const int len = smbios_full_table_len(&t->header, t->eos);
596 	*current += len;
597 	return len;
598 }
599 
600 /*
601  * Clear the entire event log
602  */
elog_clear(void)603 int elog_clear(void)
604 {
605 	elog_debug("%s()\n", __func__);
606 
607 	/* Make sure ELOG structures are initialized */
608 	if (elog_init() < 0)
609 		return -1;
610 
611 	return elog_prepare_empty();
612 }
613 
elog_find_flash(void)614 static int elog_find_flash(void)
615 {
616 	size_t total_size;
617 	size_t reserved_space = ELOG_MIN_AVAILABLE_ENTRIES * ELOG_MAX_EVENT_SIZE;
618 	struct region_device *rdev = &elog_state.nv_dev;
619 
620 	elog_debug("%s()\n", __func__);
621 
622 	/* Find the ELOG base and size in FMAP */
623 	if (fmap_locate_area_as_rdev_rw(ELOG_RW_REGION_NAME, rdev) < 0) {
624 		printk(BIOS_WARNING, "ELOG: Unable to find RW_ELOG in FMAP\n");
625 		return -1;
626 	}
627 
628 	if (region_device_sz(rdev) < ELOG_SIZE) {
629 		printk(BIOS_WARNING, "ELOG: Needs a minimum size of %dKiB: %zu\n",
630 			ELOG_SIZE / KiB, region_device_sz(rdev));
631 		return -1;
632 	}
633 
634 	printk(BIOS_INFO, "ELOG: NV offset 0x%zx size 0x%zx\n",
635 		region_device_offset(rdev), region_device_sz(rdev));
636 
637 	/* Keep 4KiB max size until large malloc()s have been fixed. */
638 	total_size = MIN(ELOG_SIZE, region_device_sz(rdev));
639 	rdev_chain(rdev, rdev, 0, total_size);
640 
641 	elog_state.full_threshold = total_size - reserved_space;
642 	elog_state.shrink_size = total_size * ELOG_SHRINK_PERCENTAGE / 100;
643 
644 	if (reserved_space > elog_state.shrink_size) {
645 		printk(BIOS_ERR, "ELOG: SHRINK_PERCENTAGE too small\n");
646 		return -1;
647 	}
648 
649 	return 0;
650 }
651 
elog_sync_to_nv(void)652 static int elog_sync_to_nv(void)
653 {
654 	size_t offset;
655 	size_t size;
656 	bool erase_needed;
657 	/* Determine if any updates are required. */
658 	if (!elog_nv_needs_update())
659 		return 0;
660 
661 	erase_needed = elog_nv_needs_erase();
662 
663 	/* Erase if necessary. */
664 	if (erase_needed) {
665 		elog_nv_erase();
666 		elog_nv_reset_last_write();
667 	}
668 
669 	size = elog_nv_region_to_update(&offset);
670 
671 	elog_nv_write(offset, size);
672 	elog_nv_increment_last_write(size);
673 
674 	/*
675 	 * If erase wasn't performed then don't rescan. Assume the appended
676 	 * write was successful.
677 	 */
678 	if (!erase_needed)
679 		return 0;
680 
681 	elog_debug_dump_buffer("ELOG: in-memory mirror:\n");
682 
683 	/* Mark broken if the scan failed after a sync. */
684 	if (elog_scan_flash() < 0) {
685 		printk(BIOS_ERR, "ELOG: Sync back from NV storage failed.\n");
686 		elog_debug_dump_buffer("ELOG: Buffer from NV:\n");
687 		elog_state.elog_initialized = ELOG_BROKEN;
688 		return -1;
689 	}
690 
691 	return 0;
692 }
693 
694 /*
695  * Do not log boot count events in S3 resume or SMM.
696  */
elog_do_add_boot_count(void)697 static bool elog_do_add_boot_count(void)
698 {
699 	if (ENV_SMM)
700 		return false;
701 
702 	return !acpi_is_wakeup_s3();
703 }
704 
705 /* Check and log POST codes from previous boot */
log_last_boot_post(void)706 static void log_last_boot_post(void)
707 {
708 #if ENV_X86
709 	u8 code;
710 	u32 extra;
711 
712 	if (!CONFIG(CMOS_POST))
713 		return;
714 
715 	if (cmos_post_previous_boot(&code, &extra) == 0)
716 		return;
717 
718 	printk(BIOS_WARNING, "POST: Unexpected post code/extra "
719 	       "in previous boot: 0x%02x/0x%04x\n", code, extra);
720 
721 	elog_add_event_word(ELOG_TYPE_LAST_POST_CODE, code);
722 	if (extra)
723 		elog_add_event_dword(ELOG_TYPE_POST_EXTRA, extra);
724 #endif
725 }
726 
elog_add_boot_count(void)727 static void elog_add_boot_count(void)
728 {
729 	if (elog_do_add_boot_count()) {
730 		elog_add_event_dword(ELOG_TYPE_BOOT, boot_count_read());
731 
732 		log_last_boot_post();
733 	}
734 }
735 
736 /*
737  * Event log main entry point
738  */
elog_init(void)739 int elog_init(void)
740 {
741 	void *mirror_buffer;
742 	size_t elog_size;
743 	switch (elog_state.elog_initialized) {
744 	case ELOG_UNINITIALIZED:
745 		break;
746 	case ELOG_INITIALIZED:
747 		return 0;
748 	case ELOG_BROKEN:
749 		return -1;
750 	}
751 	elog_state.elog_initialized = ELOG_BROKEN;
752 
753 	if (!ENV_SMM)
754 		timestamp_add_now(TS_ELOG_INIT_START);
755 
756 	elog_debug("%s()\n", __func__);
757 
758 	/* Set up the backing store */
759 	if (elog_find_flash() < 0)
760 		return -1;
761 
762 	elog_size = region_device_sz(&elog_state.nv_dev);
763 	mirror_buffer = elog_mirror_buf;
764 	rdev_chain_mem_rw(&elog_state.mirror_dev, mirror_buffer, elog_size);
765 
766 	/*
767 	 * Mark as initialized to allow elog_init() to be called and deemed
768 	 * successful in the prepare/shrink path which adds events.
769 	 */
770 	elog_state.elog_initialized = ELOG_INITIALIZED;
771 
772 	/* Load the log from flash and prepare the flash if necessary. */
773 	if (elog_scan_flash() < 0 && elog_prepare_empty() < 0) {
774 		printk(BIOS_ERR, "ELOG: Unable to prepare flash\n");
775 		return -1;
776 	}
777 
778 	printk(BIOS_INFO, "ELOG: area is %zu bytes, full threshold %d,"
779 	       " shrink size %d\n", region_device_sz(&elog_state.nv_dev),
780 	       elog_state.full_threshold, elog_state.shrink_size);
781 
782 	if (ENV_PAYLOAD_LOADER)
783 		elog_add_boot_count();
784 
785 	if (!ENV_SMM)
786 		timestamp_add_now(TS_ELOG_INIT_END);
787 
788 	return 0;
789 }
790 
791 /*
792  * Add an event to the log
793  */
elog_add_event_raw(u8 event_type,void * data,u8 data_size)794 int elog_add_event_raw(u8 event_type, void *data, u8 data_size)
795 {
796 	struct event_header *event;
797 	struct rtc_time time = { 0 };
798 	u8 event_size;
799 
800 	elog_debug("%s(type=%X)\n", __func__, event_type);
801 
802 	/* Make sure ELOG structures are initialized */
803 	if (elog_init() < 0)
804 		return -1;
805 
806 	/* Header + Data + Checksum */
807 	event_size = sizeof(*event) + data_size + 1;
808 	if (event_size > ELOG_MAX_EVENT_SIZE) {
809 		printk(BIOS_ERR, "ELOG: Event(%X) data size too "
810 		       "big (%d)\n", event_type, event_size);
811 		return -1;
812 	}
813 
814 	/* Make sure event data can fit */
815 	event = elog_get_next_event_buffer(event_size);
816 	if (event == NULL) {
817 		printk(BIOS_ERR, "ELOG: Event(%X) does not fit\n",
818 		       event_type);
819 		return -1;
820 	}
821 
822 	/* Fill out event data */
823 	event->type = event_type;
824 	event->length = event_size;
825 	if (CONFIG(RTC))
826 		rtc_get(&time);
827 
828 	elog_fill_timestamp(event, time.sec, time.min, time.hour,
829 			    time.mday, time.mon, time.year);
830 
831 	if (data_size)
832 		memcpy(&event[1], data, data_size);
833 
834 	/* Zero the checksum byte and then compute checksum */
835 	elog_update_checksum(event, 0);
836 	elog_update_checksum(event, -(elog_checksum_event(event)));
837 	elog_put_event_buffer(event);
838 
839 	elog_mirror_increment_last_write(event_size);
840 
841 	printk(BIOS_INFO, "ELOG: Event(%X) added with size %d ",
842 	       event_type, event_size);
843 	if (event->day != 0) {
844 		printk(BIOS_INFO, "at 20%02x-%02x-%02x %02x:%02x:%02x UTC\n",
845 		       event->year, event->month, event->day,
846 		       event->hour, event->minute, event->second);
847 	} else {
848 		printk(BIOS_INFO, "(timestamp unavailable)\n");
849 	}
850 
851 	/* Shrink the log if we are getting too full */
852 	if (elog_shrink() < 0)
853 		return -1;
854 
855 	/* Ensure the updates hit the non-volatile storage. */
856 	return elog_sync_to_nv();
857 }
858 
elog_add_event(u8 event_type)859 int elog_add_event(u8 event_type)
860 {
861 	return elog_add_event_raw(event_type, NULL, 0);
862 }
863 
elog_add_event_byte(u8 event_type,u8 data)864 int elog_add_event_byte(u8 event_type, u8 data)
865 {
866 	return elog_add_event_raw(event_type, &data, sizeof(data));
867 }
868 
elog_add_event_word(u8 event_type,u16 data)869 int elog_add_event_word(u8 event_type, u16 data)
870 {
871 	return elog_add_event_raw(event_type, &data, sizeof(data));
872 }
873 
elog_add_event_dword(u8 event_type,u32 data)874 int elog_add_event_dword(u8 event_type, u32 data)
875 {
876 	return elog_add_event_raw(event_type, &data, sizeof(data));
877 }
878 
elog_add_event_wake(u8 source,u32 instance)879 int elog_add_event_wake(u8 source, u32 instance)
880 {
881 	struct elog_event_data_wake wake = {
882 		.source = source,
883 		.instance = instance
884 	};
885 	return elog_add_event_raw(ELOG_TYPE_WAKE_SOURCE, &wake, sizeof(wake));
886 }
887 
elog_add_extended_event(u8 type,u32 complement)888 int elog_add_extended_event(u8 type, u32 complement)
889 {
890 	struct elog_event_extended_event event = {
891 		.event_type = type,
892 		.event_complement = complement
893 	};
894 	return elog_add_event_raw(ELOG_TYPE_EXTENDED_EVENT,
895 				  &event,
896 				  sizeof(event));
897 }
898 
899 /* Make sure elog_init() runs at least once to log System Boot event. */
elog_bs_init(void * unused)900 static void elog_bs_init(void *unused) { elog_init(); }
901 BOOT_STATE_INIT_ENTRY(BS_POST_DEVICE, BS_ON_ENTRY, elog_bs_init, NULL);
902