xref: /aosp_15_r20/external/coreboot/src/lib/region_file.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <commonlib/helpers.h>
4 #include <console/console.h>
5 #include <region_file.h>
6 #include <string.h>
7 
8 /*
9  * A region file provides generic support for appending new data
10  * within a storage region. The book keeping is tracked in metadata
11  * blocks where an offset pointer points to the last byte of a newly
12  * allocated byte sequence. Thus, by taking 2 block offsets one can
13  * determine start and size of the latest update. The data does not
14  * have to be the same consistent size, but the data size has be small
15  * enough to fit a metadata block and one data write within the region.
16  *
17  * The granularity of the block offsets are 16 bytes. By using 16-bit
18  * block offsets a region's total size can be no larger than 1MiB.
19  * However, the last 32 bytes cannot be used in the 1MiB maximum region
20  * because one needs to put a block offset indicating last byte written.
21  * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
22  * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
23  *
24  * The goal of this library is to provide a simple mechanism for
25  * allocating blocks of data for updates. The metadata is written first
26  * followed by the data. That means a power event between the block offset
27  * write and the data write results in blocks being allocated but not
28  * entirely written. It's up to the user of the library to sanity check
29  * data stored.
30  */
31 
32 #define REGF_BLOCK_SHIFT		4
33 #define REGF_BLOCK_GRANULARITY		(1 << REGF_BLOCK_SHIFT)
34 #define REGF_METADATA_BLOCK_SIZE	REGF_BLOCK_GRANULARITY
35 #define REGF_UNALLOCATED_BLOCK		0xffff
36 #define REGF_UPDATES_PER_METADATA_BLOCK	\
37 	(REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
38 
39 enum {
40 	RF_ONLY_METADATA = 0,
41 	RF_EMPTY = -1,
42 	RF_NEED_TO_EMPTY = -2,
43 	RF_FATAL = -3,
44 };
45 
46 struct metadata_block {
47 	uint16_t blocks[REGF_UPDATES_PER_METADATA_BLOCK];
48 };
49 
block_to_bytes(uint16_t offset)50 static size_t block_to_bytes(uint16_t offset)
51 {
52 	return (size_t)offset << REGF_BLOCK_SHIFT;
53 }
54 
bytes_to_block(size_t bytes)55 static size_t bytes_to_block(size_t bytes)
56 {
57 	return bytes >> REGF_BLOCK_SHIFT;
58 }
59 
block_offset_unallocated(uint16_t offset)60 static inline int block_offset_unallocated(uint16_t offset)
61 {
62 	return offset == REGF_UNALLOCATED_BLOCK;
63 }
64 
region_file_data_begin(const struct region_file * f)65 static inline size_t region_file_data_begin(const struct region_file *f)
66 {
67 	return f->data_blocks[0];
68 }
69 
region_file_data_end(const struct region_file * f)70 static inline size_t region_file_data_end(const struct region_file *f)
71 {
72 	return f->data_blocks[1];
73 }
74 
all_block_offsets_unallocated(const struct metadata_block * mb)75 static int all_block_offsets_unallocated(const struct metadata_block *mb)
76 {
77 	size_t i;
78 
79 	for (i = 0; i < ARRAY_SIZE(mb->blocks); i++) {
80 		if (!block_offset_unallocated(mb->blocks[i]))
81 			return 0;
82 	}
83 
84 	return 1;
85 }
86 
87 /* Read metadata block at block i. */
read_mb(size_t i,struct metadata_block * mb,const struct region_file * f)88 static int read_mb(size_t i, struct metadata_block *mb,
89 			const struct region_file *f)
90 {
91 	size_t offset = block_to_bytes(i);
92 
93 	if (rdev_readat(&f->metadata, mb, offset, sizeof(*mb)) < 0)
94 		return -1;
95 
96 	return 0;
97 }
98 
99 /* Locate metadata block with the latest update */
find_latest_mb(struct metadata_block * mb,size_t num_mb_blocks,struct region_file * f)100 static int find_latest_mb(struct metadata_block *mb, size_t num_mb_blocks,
101 				struct region_file *f)
102 {
103 	size_t l = 0;
104 	size_t r = num_mb_blocks;
105 
106 	while (l + 1 < r) {
107 		size_t mid = (l + r) / 2;
108 
109 		if (read_mb(mid, mb, f) < 0)
110 			return -1;
111 		if (all_block_offsets_unallocated(mb))
112 			r = mid;
113 		else
114 			l = mid;
115 	}
116 
117 	/* Set the base block slot. */
118 	f->slot = l * REGF_UPDATES_PER_METADATA_BLOCK;
119 
120 	/* Re-read metadata block with the latest update. */
121 	if (read_mb(l, mb, f) < 0)
122 		return -1;
123 
124 	return 0;
125 }
126 
find_latest_slot(struct metadata_block * mb,struct region_file * f)127 static void find_latest_slot(struct metadata_block *mb, struct region_file *f)
128 {
129 	size_t i;
130 
131 	for (i = REGF_UPDATES_PER_METADATA_BLOCK - 1; i > 0; i--) {
132 		if (!block_offset_unallocated(mb->blocks[i]))
133 			break;
134 	}
135 
136 	f->slot += i;
137 }
138 
fill_data_boundaries(struct region_file * f)139 static int fill_data_boundaries(struct region_file *f)
140 {
141 	struct region_device slots;
142 	size_t offset;
143 	size_t size = sizeof(f->data_blocks);
144 
145 	if (f->slot == RF_ONLY_METADATA) {
146 		size_t start = bytes_to_block(region_device_sz(&f->metadata));
147 		f->data_blocks[0] = start;
148 		f->data_blocks[1] = start;
149 		return 0;
150 	}
151 
152 	/* Sanity check the 2 slot sequence to read. If it's out of the
153 	 * metadata blocks' bounds then one needs to empty it. This is done
154 	 * to uniquely identify I/O vs data errors in the readat() below. */
155 	offset = (f->slot - 1) * sizeof(f->data_blocks[0]);
156 	if (rdev_chain(&slots, &f->metadata, offset, size)) {
157 		f->slot = RF_NEED_TO_EMPTY;
158 		return 0;
159 	}
160 
161 	if (rdev_readat(&slots, &f->data_blocks, 0, size) < 0) {
162 		printk(BIOS_ERR, "REGF failed to read data boundaries.\n");
163 		return -1;
164 	}
165 
166 	/* All used blocks should be incrementing from previous write. */
167 	if (region_file_data_begin(f) >= region_file_data_end(f)) {
168 		printk(BIOS_ERR, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
169 			region_file_data_begin(f), region_file_data_end(f));
170 		f->slot = RF_NEED_TO_EMPTY;
171 		return 0;
172 	}
173 
174 	/* Ensure data doesn't exceed the region. */
175 	if (region_file_data_end(f) >
176 		bytes_to_block(region_device_sz(&f->rdev))) {
177 		printk(BIOS_ERR, "REGF data exceeds region %zd > %zd\n",
178 			region_file_data_end(f),
179 			bytes_to_block(region_device_sz(&f->rdev)));
180 		f->slot = RF_NEED_TO_EMPTY;
181 	}
182 
183 	return 0;
184 }
185 
region_file_init(struct region_file * f,const struct region_device * p)186 int region_file_init(struct region_file *f, const struct region_device *p)
187 {
188 	struct metadata_block mb;
189 
190 	/* Total number of metadata blocks is found by reading the first
191 	 * block offset as the metadata is allocated first. At least one
192 	 * metadata block is available. */
193 
194 	memset(f, 0, sizeof(*f));
195 	f->slot = RF_FATAL;
196 
197 	/* Keep parent around for accessing data later. */
198 	if (rdev_chain_full(&f->rdev, p))
199 		return -1;
200 
201 	if (rdev_readat(p, &mb, 0, sizeof(mb)) < 0) {
202 		printk(BIOS_ERR, "REGF fail reading first metadata block.\n");
203 		return -1;
204 	}
205 
206 	/* No metadata has been allocated. Assume region is empty. */
207 	if (block_offset_unallocated(mb.blocks[0])) {
208 		f->slot = RF_EMPTY;
209 		return 0;
210 	}
211 
212 	/* If metadata block is 0 in size then need to empty. */
213 	if (mb.blocks[0] == 0) {
214 		f->slot = RF_NEED_TO_EMPTY;
215 		return 0;
216 	}
217 
218 	/* The region needs to be emptied as the metadata is broken. */
219 	if (rdev_chain(&f->metadata, p, 0, block_to_bytes(mb.blocks[0]))) {
220 		f->slot = RF_NEED_TO_EMPTY;
221 		return 0;
222 	}
223 
224 	/* Locate latest metadata block with latest update. */
225 	if (find_latest_mb(&mb, mb.blocks[0], f)) {
226 		printk(BIOS_ERR, "REGF fail locating latest metadata block.\n");
227 		f->slot = RF_FATAL;
228 		return -1;
229 	}
230 
231 	find_latest_slot(&mb, f);
232 
233 	/* Fill in the data blocks marking the latest update. */
234 	if (fill_data_boundaries(f)) {
235 		printk(BIOS_ERR, "REGF fail locating data boundaries.\n");
236 		f->slot = RF_FATAL;
237 		return -1;
238 	}
239 
240 	return 0;
241 }
242 
region_file_data(const struct region_file * f,struct region_device * rdev)243 int region_file_data(const struct region_file *f, struct region_device *rdev)
244 {
245 	size_t offset;
246 	size_t size;
247 
248 	/* Slot indicates if any data is available. */
249 	if (f->slot <= RF_ONLY_METADATA)
250 		return -1;
251 
252 	offset = block_to_bytes(region_file_data_begin(f));
253 	size = block_to_bytes(region_file_data_end(f)) - offset;
254 
255 	return rdev_chain(rdev, &f->rdev, offset, size);
256 }
257 
258 /*
259  * Allocate enough metadata blocks to maximize data updates. Do this in
260  * terms of blocks. To solve the balance of metadata vs data, 2 linear
261  * equations are solved in terms of blocks where 'x' is number of
262  * data updates and 'y' is number of metadata blocks:
263  *
264  *   x = number of data updates
265  *   y = number of metadata blocks
266  *   T = total blocks in region
267  *   D = data size in blocks
268  *   M = metadata size in blocks
269  *   A = updates accounted for in each metadata block
270  *
271  *   T = D * x + M * y
272  *   y = x / A
273  *   -----------------
274  *   T = D * x + M * x / A = x * (D + M / A)
275  *   T * A = x * (D * A + M)
276  *   x = T * A / (D * A + M)
277  */
allocate_metadata(struct region_file * f,size_t data_blks)278 static int allocate_metadata(struct region_file *f, size_t data_blks)
279 {
280 	size_t t, m;
281 	size_t x, y;
282 	uint16_t tot_metadata;
283 	const size_t a = REGF_UPDATES_PER_METADATA_BLOCK;
284 	const size_t d = data_blks;
285 
286 	t = bytes_to_block(ALIGN_DOWN(region_device_sz(&f->rdev),
287 					REGF_BLOCK_GRANULARITY));
288 	m = bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE,
289 					REGF_BLOCK_GRANULARITY));
290 
291 	/* Ensure at least one data update can fit with 1 metadata block
292 	 * within the region. */
293 	if (d > t - m)
294 		return -1;
295 
296 	/* Maximize number of updates by aligning up to the number updates in
297 	 * a metadata block. May not really be able to achieve the number of
298 	 * updates in practice, but it ensures enough metadata blocks are
299 	 * allocated. */
300 	x = ALIGN_UP(t * a / (d * a + m), a);
301 
302 	/* One data block has to fit. */
303 	if (x == 0)
304 		x = 1;
305 
306 	/* Now calculate how many metadata blocks are needed. */
307 	y = ALIGN_UP(x, a) / a;
308 
309 	/* Need to commit the metadata allocation. */
310 	tot_metadata = m * y;
311 	if (rdev_writeat(&f->rdev, &tot_metadata, 0, sizeof(tot_metadata)) < 0)
312 		return -1;
313 
314 	if (rdev_chain(&f->metadata, &f->rdev, 0,
315 				block_to_bytes(tot_metadata)))
316 		return -1;
317 
318 	/* Initialize a 0 data block to start appending from. */
319 	f->data_blocks[0] = tot_metadata;
320 	f->data_blocks[1] = tot_metadata;
321 
322 	return 0;
323 }
324 
update_can_fit(const struct region_file * f,size_t data_blks)325 static int update_can_fit(const struct region_file *f, size_t data_blks)
326 {
327 	size_t metadata_slots;
328 	size_t end_blk;
329 
330 	metadata_slots = region_device_sz(&f->metadata) / sizeof(uint16_t);
331 
332 	/* No more slots. */
333 	if ((size_t)f->slot + 1 >= metadata_slots)
334 		return 0;
335 
336 	/* See where the last block lies from the current one. */
337 	end_blk = data_blks + region_file_data_end(f);
338 
339 	/* Update would have exceeded block addressing. */
340 	if (end_blk >= REGF_UNALLOCATED_BLOCK)
341 		return 0;
342 
343 	/* End block exceeds size of region. */
344 	if (end_blk > bytes_to_block(region_device_sz(&f->rdev)))
345 		return 0;
346 
347 	return 1;
348 }
349 
commit_data_allocation(struct region_file * f,size_t data_blks)350 static int commit_data_allocation(struct region_file *f, size_t data_blks)
351 {
352 	size_t offset;
353 
354 	f->slot++;
355 
356 	offset = f->slot * sizeof(uint16_t);
357 	f->data_blocks[0] = region_file_data_end(f);
358 	f->data_blocks[1] = region_file_data_begin(f) + data_blks;
359 
360 	if (rdev_writeat(&f->metadata, &f->data_blocks[1], offset,
361 				sizeof(f->data_blocks[1])) < 0)
362 		return -1;
363 
364 	return 0;
365 }
366 
commit_data(const struct region_file * f,const struct update_region_file_entry * entries,size_t num_entries)367 static int commit_data(const struct region_file *f,
368 		       const struct update_region_file_entry *entries,
369 		       size_t num_entries)
370 {
371 	size_t offset = block_to_bytes(region_file_data_begin(f));
372 	for (int i = 0; i < num_entries; i++) {
373 		if (rdev_writeat(&f->rdev, entries[i].data, offset, entries[i].size) < 0)
374 			return -1;
375 		offset += entries[i].size;
376 	}
377 	return 0;
378 }
379 
handle_empty(struct region_file * f,size_t data_blks)380 static int handle_empty(struct region_file *f, size_t data_blks)
381 {
382 	if (allocate_metadata(f, data_blks)) {
383 		printk(BIOS_ERR, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
384 			data_blks, bytes_to_block(region_device_sz(&f->rdev)));
385 		return -1;
386 	}
387 
388 	f->slot = RF_ONLY_METADATA;
389 
390 	return 0;
391 }
392 
handle_need_to_empty(struct region_file * f)393 static int handle_need_to_empty(struct region_file *f)
394 {
395 	if (rdev_eraseat(&f->rdev, 0, region_device_sz(&f->rdev)) < 0) {
396 		printk(BIOS_ERR, "REGF empty failed.\n");
397 		return -1;
398 	}
399 
400 	f->slot = RF_EMPTY;
401 
402 	return 0;
403 }
404 
handle_update(struct region_file * f,size_t blocks,const struct update_region_file_entry * entries,size_t num_entries)405 static int handle_update(struct region_file *f, size_t blocks,
406 			 const struct update_region_file_entry *entries,
407 			 size_t num_entries)
408 {
409 	if (!update_can_fit(f, blocks)) {
410 		printk(BIOS_INFO, "REGF update can't fit. Will empty.\n");
411 		f->slot = RF_NEED_TO_EMPTY;
412 		return 0;
413 	}
414 
415 	if (commit_data_allocation(f, blocks)) {
416 		printk(BIOS_ERR, "REGF failed to commit data allocation.\n");
417 		return -1;
418 	}
419 
420 	if (commit_data(f, entries, num_entries)) {
421 		printk(BIOS_ERR, "REGF failed to commit data.\n");
422 		return -1;
423 	}
424 
425 	return 0;
426 }
427 
region_file_update_data_arr(struct region_file * f,const struct update_region_file_entry * entries,size_t num_entries)428 int region_file_update_data_arr(struct region_file *f,
429 				const struct update_region_file_entry *entries,
430 				size_t num_entries)
431 {
432 	int ret;
433 	size_t blocks;
434 	size_t size = 0;
435 
436 	for (int i = 0; i < num_entries; i++)
437 		size += entries[i].size;
438 	blocks = bytes_to_block(ALIGN_UP(size, REGF_BLOCK_GRANULARITY));
439 
440 	while (1) {
441 		int prev_slot = f->slot;
442 
443 		switch (f->slot) {
444 		case RF_EMPTY:
445 			ret = handle_empty(f, blocks);
446 			break;
447 		case RF_NEED_TO_EMPTY:
448 			ret = handle_need_to_empty(f);
449 			break;
450 		case RF_FATAL:
451 			ret = -1;
452 			break;
453 		default:
454 			ret = handle_update(f, blocks, entries, num_entries);
455 			break;
456 		}
457 
458 		/* Failing case. No more updates allowed to be attempted. */
459 		if (ret) {
460 			f->slot = RF_FATAL;
461 			break;
462 		}
463 
464 		/* No more state changes and data committed. */
465 		if (f->slot > RF_ONLY_METADATA && prev_slot != f->slot)
466 			break;
467 	}
468 
469 	return ret;
470 }
471 
region_file_update_data(struct region_file * f,const void * buf,size_t size)472 int region_file_update_data(struct region_file *f, const void *buf, size_t size)
473 {
474 	struct update_region_file_entry entry = {
475 		.size = size,
476 		.data = buf,
477 	};
478 	return region_file_update_data_arr(f, &entry, 1);
479 }
480