xref: /aosp_15_r20/external/flashrom/action_descriptor.c (revision 0d6140be3aa665ecc836e8907834fcd3e3b018fc)
1 /* Copyright 2018 The Chromium OS Authors. All rights reserved.
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdbool.h>
10 
11 #include "action_descriptor.h"
12 #include "chipdrivers.h"
13 #include "flash.h"
14 #include "layout.h"
15 #include "programmer.h"
16 
17 /**
18  * action_descriptor is DEPRECATED !
19  * Allow for easy toggling off for the eventual removal from the tree.
20  *
21  * TRUE  := action_descriptor is not used.
22  * FALSE := action_descriptor is used on Intel.
23  */
24 static bool deprecate_ad = false;
25 
26 /*
27  * This global variable is used to communicate the type of ICH found on the
28  * device. When running on non-intel platforms default value of
29  * CHIPSET_ICH_UNKNOWN is used.
30 */
31 extern enum ich_chipset ich_generation;
32 
33 /*
34  * Unfortunate global state.
35  */
36 static bool dry_run = false;
37 
38 /*
39  * This module analyses the contents of 'before' and 'after' flash images and
40  * based on the images' differences prepares a list of processing actions to
41  * take.
42  *
43  * The goal is to prepare actions using the chip's erase capability in a most
44  * efficient way: erasing smallest possible portions of the chip gives the
45  * highest granularity, but if many small areas need to be erased, erasing a
46  * larger area, even if re-writing it completely, is more efficient. The
47  * breakdown is somewhere at 60%.
48  *
49  * Each flash chip description in flash.c includes a set of erase command
50  * descriptors, different commands allowing to erase blocks of fixed different
51  * sizes. Sometimes the erase command for a certain block size does not cover
52  * the entire chip. This module preprocesses the flash chip description to
53  * compile an array of erase commands with their block size indices such that
54  * it is guaranteed that the command can be used to erase anywhere in the chip
55  * where erase is required based on the differences between 'before' and
56  * 'after' images.
57  *
58  * 'eraser_index' below is the index into the 'block_erasers' array of the
59  * flash chip descriptor, points to the function to use to erase the block of
60  * a certain size.
61  *
62  * The erase command could potentially operate on blocks of different sizes,
63  * 'region_index' is the index into the 'block_erasers.eraseblocks' array
64  * which defines what block size would be used by this erase command.
65  */
66 struct eraser {
67 	int eraser_index;
68 	int region_index;
69 };
70 
71 /*
72  * A helper structure which holds information about blocks of a given size
73  * which require writing and or erasing.
74  *
75  * The actual map of the blocks is pointed at by the 'block_map' field, one
76  * byte per block. Block might need an erase, or just a write, depending on
77  * the contents of 'before' and 'after' flash images.
78  *
79  * The 'limit' field holds the number of blocks of this size, which is
80  * equivalent to one block of the next larger size in term of time required
81  * for erasing/programming.
82  */
83 struct range_map {
84 	size_t block_size;
85 	int limit;
86 	struct b_map {
87 		uint8_t need_change:1;
88 		uint8_t need_erase:1;
89 	} *block_map;
90 };
91 
92 /*
93  * A debug function printing out the array or processing units from an the
94  * action descriptor.
95  */
dump_descriptor(struct action_descriptor * descriptor)96 static void dump_descriptor(struct action_descriptor *descriptor)
97 {
98 	struct processing_unit *pu = descriptor->processing_units;
99 
100 	while (pu->num_blocks) {
101 		msg_pdbg("%06zx..%06zx %6zx x %zd eraser %d\n", pu->offset,
102 			 pu->offset + pu->num_blocks * pu->block_size - 1,
103 			 pu->block_size, pu->num_blocks,
104 			 pu->block_eraser_index);
105 		pu++;
106 	}
107 }
108 
109 /*
110  * Do not allow use of unsupported erasers functions.
111  *
112  * On some Intel platforms the ICH SPI controller is restricting the set of
113  * SPI command codes the AP can issue, in particular limiting the set of erase
114  * functions to just two of them.
115  *
116  * This function creates a local copy of the flash chip descriptor found in
117  * the main table, filtering out unsupported erase function pointers, when
118  * necessary.
119  *
120  * flash: pointer to the master flash context, including the original chip
121  *        descriptor.
122  * chip: pointer to a flash chip descriptor copy, potentially with just a
123  *            subset of erasers included.
124  */
fix_erasers_if_needed(struct flashchip * chip,struct flashctx * flash)125 static void fix_erasers_if_needed(struct flashchip *chip,
126 				  struct flashctx *flash)
127 {
128 	int i;
129 
130 	if (deprecate_ad)
131 		return;
132 
133 	/* Need to copy no matter what. */
134 	*chip = *flash->chip;
135 
136 #if ((defined (__i386__) || defined (__x86_64__) || defined(__amd64__))) && CONFIG_INTERNAL
137 	/*
138 	 * ich_generation is set to the chipset type when running on an x86
139 	 * device, even when flashrom was invoked to program the EC.
140 	 *
141 	 * But ICH type does not affect EC programming path, so no need to
142 	 * check if the eraser is supported in that case.
143 	 */
144 	if ((ich_generation == CHIPSET_ICH_UNKNOWN) || programming_ec()) {
145 		msg_pdbg("%s: kept all erasers\n",  __func__);
146 		return;
147 	}
148 #else
149 	msg_pdbg("%s: kept all erasers on non-x86\n",  __func__);
150 	return;
151 #endif /* !IS_X86 */
152 
153 	/*
154 	 * We are dealing with an Intel controller; different chipsets allow
155 	 * different erase commands. Let's check the commands and allow only
156 	 * those which the controller accepts.
157 	 */
158 	dry_run = true;
159 	for (i = 0; i < NUM_ERASEFUNCTIONS; i++) {
160 		erasefunc_t *erase_func = lookup_erase_func_ptr(&chip->block_erasers[i]);
161 		/* Assume it is not allowed. */
162 		if (!erase_func)
163 			continue;
164 
165 		if (!erase_func(flash, 0, flash->chip->total_size * 1024)) {
166 			msg_pdbg("%s: kept eraser at %d\n",  __func__, i);
167 			continue;
168 		}
169 
170 		chip->block_erasers[i].block_erase = NO_BLOCK_ERASE_FUNC;
171 	}
172 	dry_run = false;
173 }
174 
175 /*
176  * Prepare a list of erasers available on this chip, sorted by the block size,
177  * from lower to higher.
178  *
179  * @flash	    pointer to the flash context
180  * @erase_size	    maximum offset which needs to be erased
181  * @sorted_erasers  pointer to the array of eraser structures, large enough to
182  *                  fit NUM_ERASEFUNCTIONS elements.
183  *
184  * Returns number of elements put into the 'sorted_erasers' array.
185  */
fill_sorted_erasers(struct flashctx * flash,size_t erase_size,struct eraser * sorted_erasers)186 static size_t fill_sorted_erasers(struct flashctx *flash,
187 				  size_t erase_size,
188 				  struct eraser *sorted_erasers)
189 {
190 	size_t j, k;
191 	size_t chip_eraser;
192 	size_t chip_region;
193 	struct flashchip chip; /* Local copy, potentially altered. */
194 	/*
195 	 * In case chip description does not include any functions covering
196 	 * the entire space (this could happen when the description comes from
197 	 * the Chrome OS TP driver for instance), use the best effort.
198 	 *
199 	 * The structure below saves information about the eraser which covers
200 	 * the most of the chip space, it is used if no valid functions were
201 	 * found, which allows programming to succeed.
202 	 *
203 	 * The issue be further investigated under b/110474116.
204 	 */
205 	struct {
206 		int max_total;
207 		int alt_function;
208 		int alt_region;
209 	} fallback = {};
210 
211 	fix_erasers_if_needed(&chip, flash);
212 
213 	/* Iterate over all available erase functions/block sizes. */
214 	for (j = k = 0; k < NUM_ERASEFUNCTIONS; k++) {
215 		size_t new_block_size;
216 		size_t m, n;
217 
218 		/* Make sure there is a function in is slot */
219 		if (!chip.block_erasers[k].block_erase)
220 			continue;
221 
222 		/*
223 		 * Make sure there is a (block size * count) combination which
224 		 * would erase up to required offset into the chip.
225 		 *
226 		 * If this is not the case, but the current total size exceeds
227 		 * the previously saved fallback total size, make the current
228 		 * block the best available fallback case.
229 		 */
230 		for (n = 0; n < NUM_ERASEREGIONS; n++) {
231 			const struct eraseblock *eb =
232 				chip.block_erasers[k].eraseblocks + n;
233 			size_t total = eb->size * eb->count;
234 
235 			if (total >= erase_size)
236 				break;
237 
238 			if (total > (size_t)fallback.max_total) {
239 				fallback.max_total = total;
240 				fallback.alt_region = n;
241 				fallback.alt_function = k;
242 			}
243 		}
244 
245 		if (n == NUM_ERASEREGIONS) {
246 			 /*
247 			  * This function will not erase far enough into the
248 			  * chip.
249 			  */
250 			continue;
251 		}
252 
253 		new_block_size = chip.block_erasers[k].eraseblocks[n].size;
254 
255 		/*
256 		 * Place this block in the sorted position in the
257 		 * sorted_erasers array.
258 		 */
259 		for (m = 0; m < j; m++) {
260 			size_t old_block_size;
261 
262 			chip_eraser = sorted_erasers[m].eraser_index;
263 			chip_region = sorted_erasers[m].region_index;
264 
265 			old_block_size = chip.block_erasers
266 				[chip_eraser].eraseblocks[chip_region].size;
267 
268 			if (old_block_size < new_block_size)
269 				continue;
270 
271 			/* Do not keep duplicates in the sorted array. */
272 			if (old_block_size == new_block_size) {
273 				j--;
274 				break;
275 			}
276 
277 			memmove(sorted_erasers + m + 1,
278 				sorted_erasers + m,
279 				sizeof(sorted_erasers[0]) * (j - m));
280                         break;
281                 }
282 		sorted_erasers[m].eraser_index = k;
283 		sorted_erasers[m].region_index = n;
284 		j++;
285         }
286 
287 	if (j) {
288 		msg_pdbg("%s: found %zd valid erasers\n", __func__, j);
289 		return j;
290 	}
291 
292 	if (!fallback.max_total) {
293 		msg_cerr("No erasers found for this chip (%s:%s)!\n",
294 			 chip.vendor, chip.name);
295 		exit(1);
296 	}
297 
298 	sorted_erasers[0].eraser_index = fallback.alt_function;
299 	sorted_erasers[0].region_index = fallback.alt_region;
300 	msg_pwarn("%s: using fallback eraser: "
301 		  "region %d, function %d total %#x vs %#zx\n",
302 		  __func__, fallback.alt_region, fallback.alt_function,
303 		  fallback.max_total, erase_size);
304 
305 	return 1;
306 }
307 
308 /*
309  * When it is determined that the larger block will have to be erased because
310  * a large enough number of the blocks of the previous smaller size need to be
311  * erased, all blocks of smaller sizes falling into the range of addresses of
312  * this larger block will not have to be erased/written individually, so they
313  * need to be unmarked for erase/change.
314  *
315  * This function recursively invokes itself to clean all smaller size blocks
316  * which are in the range of the current larger block.
317  *
318  * @upper_level_map  pointer to the element of the range map array where the
319  *                   current block belongs.
320  * @block_index      index of the current block in the map of the blocks of
321  *                   the current range map element.
322  * @i		     index of this range map in the array of range maps,
323  *                   guaranteed to be 1 or above, so that there is always a
324  *                   smaller block size range map at i - 1.
325  */
clear_all_nested(struct range_map * upper_level_map,size_t block_index,unsigned i)326 static void clear_all_nested(struct range_map *upper_level_map,
327 			     size_t block_index,
328 			     unsigned i)
329 {
330 	struct range_map *this_level_map = upper_level_map - 1;
331 	size_t range_start;
332 	size_t range_end;
333 	size_t j;
334 
335 	range_start = upper_level_map->block_size * block_index;
336 	range_end = range_start + upper_level_map->block_size;
337 
338 	for (j = range_start / this_level_map->block_size;
339 	     j < range_end / this_level_map->block_size;
340 	     j++) {
341 		this_level_map->block_map[j].need_change = 0;
342 		this_level_map->block_map[j].need_erase = 0;
343 		if (i > 1)
344 			clear_all_nested(this_level_map, j, i - 1);
345 	}
346 }
347 
348 /*
349  * Once all lowest range size blocks which need to be erased have been
350  * identified, we need to see if there are so many of them that they maybe be
351  * folded into larger size blocks, so that a single larger erase operation is
352  * required instead of many smaller ones.
353  *
354  * @maps       pointer to the array of range_map structures, sorted by block
355  *	       size from lower to higher, only the lower size bock map has
356  *	       been filled up.
357  * @num_maps   number of elements in the maps array.
358  * @chip_size  size of the flash chip, in bytes.
359  */
fold_range_maps(struct range_map * maps,size_t num_maps,size_t chip_size)360 static void fold_range_maps(struct range_map *maps,
361 			    size_t num_maps,
362 			    size_t chip_size)
363 {
364 	size_t block_index;
365 	unsigned i;
366 	struct range_map *map;
367 
368 	/*
369 	 * First go from bottom to top, marking higher size blocks which need
370 	 * to be erased based on the count of lower size blocks marked for
371 	 * erasing which fall into the range of addresses covered by the
372 	 * larger size block.
373 	 *
374 	 * Starting from the second element of the array, as the first element
375 	 * is the only one filled up so far.
376 	 */
377 	for (i = 1; i < num_maps; i++) {
378 		int block_mult;
379 
380 		map = maps + i;
381 
382 		/* How many lower size blocks fit into this block. */
383 		block_mult = map->block_size / map[-1].block_size;
384 
385 		for (block_index = 0;
386 		     block_index < (chip_size/map->block_size);
387 		     block_index++) {
388 			int lower_start;
389 			int lower_end;
390 			int lower_index;
391 			int erase_marked_blocks;
392 			int change_marked_blocks;
393 
394 			lower_start = block_index * block_mult;
395 			lower_end = lower_start + block_mult;
396 			erase_marked_blocks = 0;
397 			change_marked_blocks = 0;
398 
399 			for (lower_index = lower_start;
400 			     lower_index < lower_end;
401 			     lower_index++) {
402 
403 				if (map[-1].block_map[lower_index].need_erase)
404 					erase_marked_blocks++;
405 
406 				if (map[-1].block_map[lower_index].need_change)
407 					change_marked_blocks++;
408 			}
409 
410 			/*
411 			 * Mark larger block for erasing; if any of the
412 			 * smaller size blocks was marked as 'need_change',
413 			 * mark the larger size block as well.
414 			 */
415 			if (erase_marked_blocks > map[-1].limit) {
416 				map->block_map[block_index].need_erase = 1;
417 				map->block_map[block_index].need_change =
418 					change_marked_blocks ? 1 : 0;
419 			}
420 		}
421 	}
422 
423 	/*
424 	 * Now let's go larger to smaller block sizes, to make sure that all
425 	 * nested blocks of a bigger block marked for erasing are not marked
426 	 * for erasing any more; erasing the encompassing block will sure
427 	 * erase all nested blocks of all smaller sizes.
428 	 */
429 	for (i = num_maps - 1; i > 0; i--) {
430 		map = maps + i;
431 
432 		for (block_index = 0;
433 		     block_index < (chip_size/map->block_size);
434 		     block_index++) {
435 			if (!map->block_map[block_index].need_erase)
436 				continue;
437 
438 			clear_all_nested(map, block_index, i);
439 		}
440 	}
441 }
442 
443 /*
444  * A function to fill the processing_units array of the action descriptor with
445  * a set of processing units, which describe flash chip blocks which need to
446  * be erased/programmed to to accomplish the action requested by user when
447  * invoking flashrom.
448  *
449  * This set of processing units is determined based on comparing old and new
450  * flashrom contents.
451  *
452  * First, blocks which are required to be erased and/or written are identified
453  * at the finest block size granularity.
454  *
455  * Then the distribution of those blocks is analyzed, and if enough of smaller
456  * blocks in a single larger block address range need to be erased, the larger
457  * block is marked for erasing.
458  *
459  * This same process is applied again to increasingly larger block sizes until
460  * the largest granularity blocks are marked as appropriate.
461  *
462  * After this the range map array is scanned from larger block sizes to
463  * smaller; each time when a larger block marked for erasing is detected, all
464  * smaller size blocks in the same address range are unmarked for erasing.
465  *
466  * In the end only blocks which need to be modified remain marked, and at the
467  * finest possible granularity. The list of these blocks is added to the
468  * 'processing_units' array of the descriptor and becomes the list of actions
469  * to be take to program the flash chip.
470  *
471  * @descriptor		descriptor structure to fill, allocated by the caller.
472  * @sorted_erasers      pointer to an array of eraser descriptors, sorted by
473  *			block size.
474  * @chip_erasers	pointer to the array of erasers from this flash
475  * 			chip's descriptor.
476  * @chip_size		size of this chip in bytes
477  * @num_sorted_erasers  size of the sorted_erasers array
478  * @erased_value	value contained in all bytes of the erased flash
479  */
fill_action_descriptor(struct action_descriptor * descriptor,struct eraser * sorted_erasers,struct block_eraser * chip_erasers,size_t chip_size,size_t num_sorted_erasers,unsigned erased_value)480 static void fill_action_descriptor(struct action_descriptor *descriptor,
481 				   struct eraser *sorted_erasers,
482 				   struct block_eraser* chip_erasers,
483 				   size_t chip_size,
484 				   size_t num_sorted_erasers,
485 				   unsigned erased_value)
486 {
487 	const uint8_t *newc;
488 	const uint8_t *oldc;
489 	int consecutive_blocks;
490 	size_t block_size;
491 	struct b_map *block_map;
492 	struct range_map range_maps[num_sorted_erasers];
493 	unsigned i;
494 	unsigned pu_index;
495 
496 	/*
497 	 * This array has enough room to hold helper structures, one for each
498 	 * available block size.
499 	 */
500 	memset(range_maps, 0, sizeof(range_maps));
501 
502 	/*
503 	 * Initialize range_maps array: allocate space for block_map arrays on
504 	 * every entry (block maps are used to keep track of blocks which need
505 	 * to be erased/written) and calculate the limit where smaller blocks
506 	 * should be replaced by the next larger size block.
507 	 */
508 	for (i = 0; i < num_sorted_erasers; i++) {
509 		size_t larger_block_size;
510 		size_t map_size;
511 		size_t num_blocks;
512 		unsigned function;
513 		unsigned region;
514 
515 		function = sorted_erasers[i].eraser_index;
516 		region  = sorted_erasers[i].region_index;
517 		block_size =  chip_erasers[function].eraseblocks[region].size;
518 
519 		range_maps[i].block_size = block_size;
520 
521 		/*
522 		 * Allocate room for the map where blocks which require
523 		 * writing/erasing will be marked.
524 		 */
525 		num_blocks = chip_size/block_size;
526 		map_size = num_blocks * sizeof(struct b_map);
527 		range_maps[i].block_map = malloc(map_size);
528 		if (!range_maps[i].block_map) {
529 			msg_cerr("%s: Failed to allocate %zd bytes\n",
530 				 __func__, map_size);
531 			exit(1);
532 		}
533 		memset(range_maps[i].block_map, 0, map_size);
534 
535 		/*
536 		 * Limit is calculated for all block sizes but the largest
537 		 * one, because there is no way to further consolidate the
538 		 * largest blocks.
539 		 */
540 		if (i < (num_sorted_erasers - 1)) {
541 			function = sorted_erasers[i + 1].eraser_index;
542 			region  = sorted_erasers[i + 1].region_index;
543 			larger_block_size = chip_erasers
544 				[function].eraseblocks[region].size;
545 
546 			/*
547 			 * How many of the lower size blocks need to be have
548 			 * to be erased before it is worth moving to the
549 			 * larger size.
550 			 *
551 			 * The admittedly arbitrary rule of thumb here is if
552 			 * 70% or more of the lower size blocks need to be
553 			 * erased, forget the lower size blocks and move to
554 			 * the higher size one.
555 			 */
556 			range_maps[i].limit = ((larger_block_size /
557 						block_size) * 7) / 10;
558 		}
559 	}
560 
561 	/* Cache pointers to 'before' and 'after' contents. */
562 	oldc = descriptor->oldcontents;
563 	newc = descriptor->newcontents;
564 
565 	/* Now, let's fill up the map for the smallest bock size. */
566 	block_size = range_maps[0].block_size;
567 	block_map = range_maps[0].block_map;
568 	for (i = 0; i < chip_size; i++) {
569 		int block_index;
570 
571 		if (oldc[i] == newc[i])
572 			continue;
573 
574 		block_index = i/block_size;
575 
576 		if (oldc[i] != erased_value)
577 			block_map[block_index].need_erase = 1;
578 
579 		if (newc[i] != erased_value)
580 			block_map[block_index].need_change = 1;
581 
582 		if (block_map[block_index].need_erase &&
583 		    block_map[block_index].need_change) {
584 			/* Can move to the next block. */
585 			i += range_maps[0].block_size;
586 			i &= ~(range_maps[0].block_size - 1);
587 			i--; /* adjust for increment in the for loop */
588 		}
589 	}
590 
591 	/* Now let's see what can be folded into larger blocks. */
592 	fold_range_maps(range_maps, num_sorted_erasers, chip_size);
593 
594 	/* Finally we can fill the action descriptor. */
595 	consecutive_blocks = 0;
596 	pu_index = 0;  /* Number of initialized processing units. */
597 	for (i = 0; i < num_sorted_erasers; i++) {
598 		size_t j;
599 		struct processing_unit *pu;
600 		size_t map_size = chip_size/range_maps[i].block_size;
601 
602 		for (j = 0; j < map_size; j++) {
603 
604 			block_map = range_maps[i].block_map + j;
605 
606 			if (block_map->need_erase || block_map->need_change) {
607 				consecutive_blocks++;
608 				continue;
609 			}
610 
611 			if (!consecutive_blocks)
612 				continue;
613 
614 			/* Add programming/erasing uint. */
615 			pu = descriptor->processing_units + pu_index++;
616 
617 			pu->block_size = range_maps[i].block_size;
618 			pu->offset = (j - consecutive_blocks) * pu->block_size;
619 			pu->num_blocks = consecutive_blocks;
620 			pu->block_eraser_index = sorted_erasers[i].eraser_index;
621 			pu->block_region_index = sorted_erasers[i].region_index;
622 
623 			consecutive_blocks = 0;
624 		}
625 
626 		free(range_maps[i].block_map);
627 
628 		if (!consecutive_blocks)
629 			continue;
630 
631 		/*
632 		 * Add last programming/erasing unit for current block
633 		 * size.
634 		 */
635 		pu = descriptor->processing_units + pu_index++;
636 
637 		pu->block_size = range_maps[i].block_size;
638 		pu->offset = (j - consecutive_blocks) * pu->block_size;
639 		pu->num_blocks = consecutive_blocks;
640 		pu->block_eraser_index = sorted_erasers[i].eraser_index;
641 		pu->block_region_index = sorted_erasers[i].region_index;
642 		consecutive_blocks = 0;
643 	}
644 
645 	descriptor->processing_units[pu_index].num_blocks = 0;
646 }
647 
is_dry_run(void)648 bool is_dry_run(void)
649 {
650 	return dry_run;
651 }
652 
prepare_action_descriptor(struct flashctx * flash,void * oldcontents,void * newcontents)653 struct action_descriptor *prepare_action_descriptor(struct flashctx *flash,
654 						    void *oldcontents,
655 						    void *newcontents)
656 {
657 	struct eraser sorted_erasers[NUM_ERASEFUNCTIONS];
658 	size_t i;
659 	size_t num_erasers;
660 	int max_units;
661 	size_t block_size = 0;
662 	struct action_descriptor *descriptor;
663 	size_t chip_size = flash->chip->total_size * 1024;
664 
665 	/*
666 	 * Find the maximum size of the area which might have to be erased,
667 	 * this is needed to ensure that the picked erase function can go all
668 	 * the way to the requred offset, as some of the erase functions
669 	 * operate only on part of the chip starting at offset zero.
670 	 *
671 	 * Not an efficient way to do it, but this is acceptable on the host.
672 	 *
673 	 * Look for the largest offset where the difference is, this is the
674 	 * highest offset which might need to be erased.
675 	 */
676 	for (i = 0; i < chip_size; i++)
677 		if (((uint8_t *)newcontents)[i] !=
678 		    ((uint8_t *)oldcontents)[i])
679 			block_size = i + 1;
680 
681 	num_erasers = fill_sorted_erasers(flash, block_size, sorted_erasers);
682 
683 	/*
684 	 * Let's allocate enough memory for the worst case action descriptor
685 	 * size, when we need to program half the chip using the smallest block
686 	 * size.
687 	 */
688 	block_size = flash->chip->block_erasers
689 		[sorted_erasers[0].eraser_index].eraseblocks
690 		[sorted_erasers[0].region_index].size;
691 	max_units = max( chip_size / (2 * block_size), 1 ) + 1;
692 	descriptor = malloc(sizeof(struct action_descriptor) +
693 			    sizeof(struct processing_unit) * max_units);
694 	if (!descriptor) {
695 		msg_cerr("Failed to allocate room for %d processing units!\n",
696 			 max_units);
697 		exit(1);
698 	}
699 
700 	descriptor->newcontents = newcontents;
701 	descriptor->oldcontents = oldcontents;
702 
703 	fill_action_descriptor(descriptor, sorted_erasers,
704 			       flash->chip->block_erasers, chip_size,
705 			       num_erasers, ERASED_VALUE(flash));
706 
707 	dump_descriptor(descriptor);
708 
709 	return descriptor;
710 }
711