xref: /aosp_15_r20/external/f2fs-tools/mkfs/f2fs_format.c (revision 59bfda1f02d633cd6b8b69f31eee485d40f6eef6)
1 /**
2  * f2fs_format.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * Dual licensed under the GPL or LGPL version 2 licenses.
8  */
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <fcntl.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <f2fs_fs.h>
15 #include <assert.h>
16 #include <stdbool.h>
17 
18 #ifdef HAVE_SYS_STAT_H
19 #include <sys/stat.h>
20 #endif
21 #ifdef HAVE_SYS_MOUNT_H
22 #include <sys/mount.h>
23 #endif
24 #include <time.h>
25 
26 #ifdef HAVE_UUID_UUID_H
27 #include <uuid/uuid.h>
28 #endif
29 #ifndef HAVE_LIBUUID
30 #define uuid_parse(a, b) -1
31 #define uuid_generate(a)
32 #define uuid_unparse(a, b) -1
33 #endif
34 
35 #include "quota.h"
36 #include "f2fs_format_utils.h"
37 
38 extern struct f2fs_configuration c;
39 struct f2fs_super_block raw_sb;
40 struct f2fs_super_block *sb = &raw_sb;
41 struct f2fs_checkpoint *cp;
42 
device_is_aliased(unsigned int dev_num)43 static inline bool device_is_aliased(unsigned int dev_num)
44 {
45 	if (dev_num >= c.ndevs)
46 		return false;
47 	return c.devices[dev_num].alias_filename != NULL;
48 }
49 
target_device_index(uint64_t blkaddr)50 static inline unsigned int target_device_index(uint64_t blkaddr)
51 {
52 	int i;
53 
54 	for (i = 0; i < c.ndevs; i++)
55 		if (c.devices[i].start_blkaddr <= blkaddr &&
56 				c.devices[i].end_blkaddr >= blkaddr)
57 			return i;
58 	return 0;
59 }
60 
61 #define GET_SEGNO(blk_addr) ((blk_addr - get_sb(main_blkaddr)) / \
62 				c.blks_per_seg)
63 #define START_BLOCK(segno) (segno * c.blks_per_seg + get_sb(main_blkaddr))
64 
65 /* Return first segment number of each area */
next_zone(int seg_type)66 static inline uint32_t next_zone(int seg_type)
67 {
68 	uint32_t next_seg = c.cur_seg[seg_type] + c.segs_per_zone;
69 	uint64_t next_blkaddr = START_BLOCK(next_seg);
70 	int dev_num;
71 
72 	dev_num = target_device_index(next_blkaddr);
73 	if (!device_is_aliased(dev_num))
74 		return GET_SEGNO(next_blkaddr);
75 
76 	while (dev_num < c.ndevs && device_is_aliased(dev_num))
77 		dev_num++;
78 
79 	return GET_SEGNO(c.devices[dev_num - 1].end_blkaddr + 1);
80 }
81 
last_zone(uint32_t total_zone)82 static inline uint32_t last_zone(uint32_t total_zone)
83 {
84 	uint32_t last_seg = (total_zone - 1) * c.segs_per_zone;
85 	uint64_t last_blkaddr = START_BLOCK(last_seg);
86 	int dev_num;
87 
88 	dev_num = target_device_index(last_blkaddr);
89 	if (!device_is_aliased(dev_num))
90 		return GET_SEGNO(last_blkaddr);
91 
92 	while (dev_num > 0 && device_is_aliased(dev_num))
93 		dev_num--;
94 
95 	return GET_SEGNO(c.devices[dev_num + 1].start_blkaddr) -
96 		c.segs_per_zone;
97 }
98 
99 #define last_section(cur)	(cur + (c.secs_per_zone - 1) * c.segs_per_sec)
100 
101 /* Return time fixed by the user or current time by default */
102 #define mkfs_time ((c.fixed_time == -1) ? time(NULL) : c.fixed_time)
103 
104 const char *media_ext_lists[] = {
105 	/* common prefix */
106 	"mp", // Covers mp3, mp4, mpeg, mpg
107 	"wm", // Covers wma, wmb, wmv
108 	"og", // Covers oga, ogg, ogm, ogv
109 	"jp", // Covers jpg, jpeg, jp2
110 
111 	/* video */
112 	"avi",
113 	"m4v",
114 	"m4p",
115 	"mkv",
116 	"mov",
117 	"webm",
118 
119 	/* audio */
120 	"wav",
121 	"m4a",
122 	"3gp",
123 	"opus",
124 	"flac",
125 
126 	/* image */
127 	"gif",
128 	"png",
129 	"svg",
130 	"webp",
131 
132 	/* archives */
133 	"jar",
134 	"deb",
135 	"iso",
136 	"gz",
137 	"xz",
138 	"zst",
139 
140 	/* others */
141 	"pdf",
142 	"pyc", // Python bytecode
143 	"ttc",
144 	"ttf",
145 	"exe",
146 
147 	/* android */
148 	"apk",
149 	"cnt", // Image alias
150 	"exo", // YouTube
151 	"odex", // Android RunTime
152 	"vdex", // Android RunTime
153 	"so",
154 
155 	NULL
156 };
157 
158 const char *hot_ext_lists[] = {
159 	"db",
160 
161 #ifndef WITH_ANDROID
162 	/* Virtual machines */
163 	"vmdk", // VMware or VirtualBox
164 	"vdi", // VirtualBox
165 	"qcow2", // QEMU
166 #endif
167 	NULL
168 };
169 
170 const char **default_ext_list[] = {
171 	media_ext_lists,
172 	hot_ext_lists
173 };
174 
is_extension_exist(const char * name)175 static bool is_extension_exist(const char *name)
176 {
177 	int i;
178 
179 	for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
180 		char *ext = (char *)sb->extension_list[i];
181 		if (!strcmp(ext, name))
182 			return 1;
183 	}
184 
185 	return 0;
186 }
187 
cure_extension_list(void)188 static void cure_extension_list(void)
189 {
190 	const char **extlist;
191 	char *ext_str;
192 	char *ue;
193 	int name_len;
194 	int i, pos = 0;
195 
196 	set_sb(extension_count, 0);
197 	memset(sb->extension_list, 0, sizeof(sb->extension_list));
198 
199 	for (i = 0; i < 2; i++) {
200 		ext_str = c.extension_list[i];
201 		extlist = default_ext_list[i];
202 
203 		while (*extlist) {
204 			name_len = strlen(*extlist);
205 			memcpy(sb->extension_list[pos++], *extlist, name_len);
206 			extlist++;
207 		}
208 		if (i == 0)
209 			set_sb(extension_count, pos);
210 		else
211 			sb->hot_ext_count = pos - get_sb(extension_count);;
212 
213 		if (!ext_str)
214 			continue;
215 
216 		/* add user ext list */
217 		ue = strtok(ext_str, ", ");
218 		while (ue != NULL) {
219 			name_len = strlen(ue);
220 			if (name_len >= F2FS_EXTENSION_LEN) {
221 				MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
222 				goto next;
223 			}
224 			if (!is_extension_exist(ue))
225 				memcpy(sb->extension_list[pos++], ue, name_len);
226 next:
227 			ue = strtok(NULL, ", ");
228 			if (pos >= F2FS_MAX_EXTENSION)
229 				break;
230 		}
231 
232 		if (i == 0)
233 			set_sb(extension_count, pos);
234 		else
235 			sb->hot_ext_count = pos - get_sb(extension_count);
236 
237 		free(c.extension_list[i]);
238 	}
239 }
240 
verify_cur_segs(void)241 static void verify_cur_segs(void)
242 {
243 	int i, j;
244 	int reorder = 0;
245 
246 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
247 		for (j = i + 1; j < NR_CURSEG_TYPE; j++) {
248 			if (c.cur_seg[i] == c.cur_seg[j]) {
249 				reorder = 1;
250 				break;
251 			}
252 		}
253 	}
254 
255 	if (!reorder)
256 		return;
257 
258 	c.cur_seg[0] = 0;
259 	for (i = 1; i < NR_CURSEG_TYPE; i++)
260 		c.cur_seg[i] = next_zone(i - 1);
261 }
262 
f2fs_prepare_super_block(void)263 static int f2fs_prepare_super_block(void)
264 {
265 	uint32_t blk_size_bytes;
266 	uint32_t log_sectorsize, log_sectors_per_block;
267 	uint32_t log_blocksize, log_blks_per_seg;
268 	uint32_t segment_size_bytes, zone_size_bytes;
269 	uint32_t alignment_bytes;
270 	uint32_t sit_segments, nat_segments;
271 	uint32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
272 	uint32_t total_valid_blks_available;
273 	uint64_t zone_align_start_offset, diff;
274 	uint64_t total_meta_zones, total_meta_segments;
275 	uint32_t sit_bitmap_size, max_sit_bitmap_size;
276 	uint32_t max_nat_bitmap_size, max_nat_segments;
277 	uint32_t total_zones, avail_zones = 0;
278 	enum quota_type qtype;
279 	int i;
280 
281 	set_sb(magic, F2FS_SUPER_MAGIC);
282 	set_sb(major_ver, F2FS_MAJOR_VERSION);
283 	set_sb(minor_ver, F2FS_MINOR_VERSION);
284 
285 	log_sectorsize = log_base_2(c.sector_size);
286 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
287 	log_blocksize = log_sectorsize + log_sectors_per_block;
288 	log_blks_per_seg = log_base_2(c.blks_per_seg);
289 
290 	set_sb(log_sectorsize, log_sectorsize);
291 	set_sb(log_sectors_per_block, log_sectors_per_block);
292 
293 	set_sb(log_blocksize, log_blocksize);
294 	set_sb(log_blocks_per_seg, log_blks_per_seg);
295 
296 	set_sb(segs_per_sec, c.segs_per_sec);
297 	set_sb(secs_per_zone, c.secs_per_zone);
298 
299 	blk_size_bytes = 1 << log_blocksize;
300 	segment_size_bytes = blk_size_bytes * c.blks_per_seg;
301 	zone_size_bytes =
302 		blk_size_bytes * c.secs_per_zone *
303 		c.segs_per_sec * c.blks_per_seg;
304 
305 	set_sb(checksum_offset, 0);
306 
307 	set_sb(block_count, c.total_sectors >> log_sectors_per_block);
308 
309 	alignment_bytes = c.zoned_mode && c.ndevs > 1 ? segment_size_bytes : zone_size_bytes;
310 
311 	zone_align_start_offset =
312 		((uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
313 		2 * F2FS_BLKSIZE + alignment_bytes  - 1) /
314 		alignment_bytes  * alignment_bytes  -
315 		(uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
316 
317 	if (c.feature & F2FS_FEATURE_RO)
318 		zone_align_start_offset = 8192;
319 
320 	if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
321 		MSG(1, "\t%s: Align start sector number to the page unit\n",
322 				c.zoned_mode ? "FAIL" : "WARN");
323 		MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
324 				c.start_sector,
325 				c.start_sector % DEFAULT_SECTORS_PER_BLOCK,
326 				DEFAULT_SECTORS_PER_BLOCK);
327 		if (c.zoned_mode)
328 			return -1;
329 	}
330 
331 	if (c.zoned_mode && c.ndevs > 1)
332 		zone_align_start_offset +=
333 			(c.devices[0].total_sectors * c.sector_size -
334 			 zone_align_start_offset) % zone_size_bytes;
335 
336 	set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
337 	sb->cp_blkaddr = sb->segment0_blkaddr;
338 
339 	MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
340 					get_sb(segment0_blkaddr));
341 
342 	if (c.zoned_mode &&
343 		((c.ndevs == 1 &&
344 			(get_sb(segment0_blkaddr) + c.start_sector /
345 			DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) ||
346 		(c.ndevs > 1 &&
347 			c.devices[1].start_blkaddr % c.zone_blocks))) {
348 		MSG(1, "\tError: Unaligned segment0 block address %u\n",
349 				get_sb(segment0_blkaddr));
350 		return -1;
351 	}
352 
353 	for (i = 0; i < c.ndevs; i++) {
354 		if (i == 0) {
355 			c.devices[i].total_segments =
356 				((c.devices[i].total_sectors *
357 				c.sector_size - zone_align_start_offset) /
358 				segment_size_bytes) / c.segs_per_zone *
359 				c.segs_per_zone;
360 			c.devices[i].start_blkaddr = 0;
361 			c.devices[i].end_blkaddr = c.devices[i].total_segments *
362 						c.blks_per_seg - 1 +
363 						sb->segment0_blkaddr;
364 		} else {
365 			c.devices[i].total_segments =
366 				(c.devices[i].total_sectors /
367 				(c.sectors_per_blk * c.blks_per_seg)) /
368 				c.segs_per_zone * c.segs_per_zone;
369 			c.devices[i].start_blkaddr =
370 					c.devices[i - 1].end_blkaddr + 1;
371 			c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
372 					c.devices[i].total_segments *
373 					c.blks_per_seg - 1;
374 			if (device_is_aliased(i)) {
375 				if (c.devices[i].zoned_model ==
376 						F2FS_ZONED_HM) {
377 					MSG(1, "\tError: do not support "
378 					"device aliasing for device[%d]\n", i);
379 					return -1;
380 				}
381 				c.aliased_segments +=
382 					c.devices[i].total_segments;
383 			}
384 		}
385 		if (c.ndevs > 1) {
386 			strncpy((char *)sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
387 			sb->devs[i].total_segments =
388 					cpu_to_le32(c.devices[i].total_segments);
389 		}
390 
391 		c.total_segments += c.devices[i].total_segments;
392 	}
393 	set_sb(segment_count, c.total_segments);
394 	set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
395 
396 	set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
397 			get_sb(segment_count_ckpt) * c.blks_per_seg);
398 
399 	blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
400 
401 	sit_segments = SEG_ALIGN(blocks_for_sit);
402 
403 	set_sb(segment_count_sit, sit_segments * 2);
404 
405 	set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
406 			c.blks_per_seg);
407 
408 	total_valid_blks_available = (get_sb(segment_count) -
409 			(get_sb(segment_count_ckpt) +
410 			get_sb(segment_count_sit))) * c.blks_per_seg;
411 
412 	blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
413 			NAT_ENTRY_PER_BLOCK);
414 
415 	if (c.large_nat_bitmap) {
416 		nat_segments = SEG_ALIGN(blocks_for_nat) *
417 						DEFAULT_NAT_ENTRY_RATIO / 100;
418 		set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
419 		max_nat_bitmap_size = (get_sb(segment_count_nat) <<
420 						log_blks_per_seg) / 8;
421 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
422 	} else {
423 		set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
424 		max_nat_bitmap_size = 0;
425 	}
426 
427 	/*
428 	 * The number of node segments should not be exceeded a "Threshold".
429 	 * This number resizes NAT bitmap area in a CP page.
430 	 * So the threshold is determined not to overflow one CP page
431 	 */
432 	sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
433 				log_blks_per_seg) / 8;
434 
435 	if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
436 		max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
437 	else
438 		max_sit_bitmap_size = sit_bitmap_size;
439 
440 	if (c.large_nat_bitmap) {
441 		/* use cp_payload if free space of f2fs_checkpoint is not enough */
442 		if (max_sit_bitmap_size + max_nat_bitmap_size >
443 						MAX_BITMAP_SIZE_IN_CKPT) {
444 			uint32_t diff =  max_sit_bitmap_size +
445 						max_nat_bitmap_size -
446 						MAX_BITMAP_SIZE_IN_CKPT;
447 			set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
448 		} else {
449 			set_sb(cp_payload, 0);
450 		}
451 	} else {
452 		/*
453 		 * It should be reserved minimum 1 segment for nat.
454 		 * When sit is too large, we should expand cp area.
455 		 * It requires more pages for cp.
456 		 */
457 		if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
458 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
459 			set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
460 	        } else {
461 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
462 							max_sit_bitmap_size;
463 			set_sb(cp_payload, 0);
464 		}
465 		max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
466 
467 		if (get_sb(segment_count_nat) > max_nat_segments)
468 			set_sb(segment_count_nat, max_nat_segments);
469 
470 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
471 	}
472 
473 	set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
474 			c.blks_per_seg);
475 
476 	total_valid_blks_available = (get_sb(segment_count) -
477 			(get_sb(segment_count_ckpt) +
478 			get_sb(segment_count_sit) +
479 			get_sb(segment_count_nat))) *
480 			c.blks_per_seg;
481 
482 	if (c.feature & F2FS_FEATURE_RO)
483 		blocks_for_ssa = 0;
484 	else
485 		blocks_for_ssa = total_valid_blks_available /
486 				c.blks_per_seg + 1;
487 
488 	set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
489 
490 	total_meta_segments = get_sb(segment_count_ckpt) +
491 		get_sb(segment_count_sit) +
492 		get_sb(segment_count_nat) +
493 		get_sb(segment_count_ssa);
494 	diff = total_meta_segments % (c.segs_per_zone);
495 	if (diff)
496 		set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
497 			(c.segs_per_zone - diff));
498 
499 	total_meta_zones = ZONE_ALIGN(total_meta_segments *
500 						c.blks_per_seg);
501 
502 	set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
503 				c.segs_per_zone * c.blks_per_seg);
504 
505 	if (c.zoned_mode) {
506 		/*
507 		 * Make sure there is enough randomly writeable
508 		 * space at the beginning of the disk.
509 		 */
510 		unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
511 
512 		if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
513 				c.devices[0].nr_rnd_zones < main_blkzone) {
514 			MSG(0, "\tError: Device does not have enough random "
515 					"write zones for F2FS volume (%lu needed)\n",
516 					main_blkzone);
517 			return -1;
518 		}
519 		/*
520 		 * Check if conventional device has enough space
521 		 * to accommodate all metadata, zoned device should
522 		 * not overlap to metadata area.
523 		 */
524 		for (i = 1; i < c.ndevs; i++) {
525 			if (c.devices[i].zoned_model != F2FS_ZONED_NONE &&
526 				c.devices[i].start_blkaddr < get_sb(main_blkaddr)) {
527 				MSG(0, "\tError: Conventional device %s is too small,"
528 					" (%"PRIu64" MiB needed).\n", c.devices[0].path,
529 					(get_sb(main_blkaddr) -
530 					c.devices[i].start_blkaddr) >> 8);
531 				return -1;
532 			}
533 		}
534 	}
535 
536 	total_zones = get_sb(segment_count) / (c.segs_per_zone) -
537 							total_meta_zones;
538 	if (total_zones == 0)
539 		goto too_small;
540 	set_sb(section_count, total_zones * c.secs_per_zone);
541 
542 	set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
543 
544 	/*
545 	 * Let's determine the best reserved and overprovisioned space.
546 	 * For Zoned device, if zone capacity less than zone size, the segments
547 	 * starting after the zone capacity are unusable in each zone. So get
548 	 * overprovision ratio and reserved seg count based on avg usable
549 	 * segs_per_sec.
550 	 */
551 	if (c.overprovision == 0)
552 		c.overprovision = get_best_overprovision(sb);
553 
554 	c.reserved_segments = get_reserved(sb, c.overprovision);
555 
556 	if (c.feature & F2FS_FEATURE_RO) {
557 		c.overprovision = 0;
558 		c.reserved_segments = 0;
559 	}
560 	if ((!(c.feature & F2FS_FEATURE_RO) &&
561 		c.overprovision == 0) ||
562 		c.total_segments < F2FS_MIN_SEGMENTS ||
563 		(c.devices[0].total_sectors *
564 			c.sector_size < zone_align_start_offset) ||
565 		(get_sb(segment_count_main) - NR_CURSEG_TYPE) <
566 						c.reserved_segments) {
567 		goto too_small;
568 	}
569 
570 	if (c.vol_uuid) {
571 		if (uuid_parse(c.vol_uuid, sb->uuid)) {
572 			MSG(0, "\tError: supplied string is not a valid UUID\n");
573 			return -1;
574 		}
575 	} else {
576 		uuid_generate(sb->uuid);
577 	}
578 
579 	/* precompute checksum seed for metadata */
580 	if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
581 		c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
582 
583 	utf8_to_utf16((char *)sb->volume_name, (const char *)c.vol_label,
584 				MAX_VOLUME_NAME, strlen(c.vol_label));
585 	set_sb(node_ino, 1);
586 	set_sb(meta_ino, 2);
587 	set_sb(root_ino, 3);
588 	c.next_free_nid = 4;
589 
590 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
591 		if (!((1 << qtype) & c.quota_bits))
592 			continue;
593 		sb->qf_ino[qtype] = cpu_to_le32(c.next_free_nid++);
594 		MSG(0, "Info: add quota type = %u => %u\n",
595 					qtype, c.next_free_nid - 1);
596 	}
597 
598 	if (c.feature & F2FS_FEATURE_LOST_FOUND)
599 		c.lpf_ino = c.next_free_nid++;
600 
601 	if (c.aliased_devices) {
602 		c.first_alias_ino = c.next_free_nid;
603 		c.next_free_nid += c.aliased_devices;
604 		avail_zones += c.aliased_segments / c.segs_per_zone;
605 	}
606 
607 	if (c.feature & F2FS_FEATURE_RO)
608 		avail_zones += 2;
609 	else
610 		avail_zones += 6;
611 
612 	if (total_zones <= avail_zones) {
613 		MSG(1, "\tError: %d zones: Need more zones "
614 			"by shrinking zone size\n", total_zones);
615 		return -1;
616 	}
617 
618 	if (c.feature & F2FS_FEATURE_RO) {
619 		c.cur_seg[CURSEG_HOT_NODE] = last_section(last_zone(total_zones));
620 		c.cur_seg[CURSEG_WARM_NODE] = 0;
621 		c.cur_seg[CURSEG_COLD_NODE] = 0;
622 		c.cur_seg[CURSEG_HOT_DATA] = 0;
623 		c.cur_seg[CURSEG_COLD_DATA] = 0;
624 		c.cur_seg[CURSEG_WARM_DATA] = 0;
625 	} else if (c.zoned_mode) {
626 		c.cur_seg[CURSEG_HOT_NODE] = 0;
627 		if (c.zoned_model == F2FS_ZONED_HM) {
628 			uint32_t conv_zones =
629 				c.devices[0].total_segments / c.segs_per_zone
630 				- total_meta_zones;
631 
632 			if (total_zones - conv_zones >= avail_zones)
633 				c.cur_seg[CURSEG_HOT_NODE] =
634 					(c.devices[1].start_blkaddr -
635 					 get_sb(main_blkaddr)) / c.blks_per_seg;
636 		}
637 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
638 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
639 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
640 		c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_HOT_DATA);
641 		c.cur_seg[CURSEG_COLD_DATA] = next_zone(CURSEG_WARM_DATA);
642 	} else {
643 		c.cur_seg[CURSEG_HOT_NODE] = 0;
644 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
645 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
646 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
647 		c.cur_seg[CURSEG_COLD_DATA] =
648 				max(last_zone((total_zones >> 2)),
649 					next_zone(CURSEG_HOT_DATA));
650 		c.cur_seg[CURSEG_WARM_DATA] =
651 				max(last_zone((total_zones >> 1)),
652 					next_zone(CURSEG_COLD_DATA));
653 	}
654 
655 	/* if there is redundancy, reassign it */
656 	if (!(c.feature & F2FS_FEATURE_RO))
657 		verify_cur_segs();
658 
659 	cure_extension_list();
660 
661 	/* get kernel version */
662 	if (c.kd >= 0) {
663 		dev_read_version(c.version, 0, VERSION_LEN);
664 		get_kernel_version(c.version);
665 	} else {
666 		get_kernel_uname_version(c.version);
667 	}
668 	MSG(0, "Info: format version with\n  \"%s\"\n", c.version);
669 
670 	memcpy(sb->version, c.version, VERSION_LEN);
671 	memcpy(sb->init_version, c.version, VERSION_LEN);
672 
673 	if (c.feature & F2FS_FEATURE_CASEFOLD) {
674 		set_sb(s_encoding, c.s_encoding);
675 		set_sb(s_encoding_flags, c.s_encoding_flags);
676 	}
677 
678 	sb->feature = cpu_to_le32(c.feature);
679 
680 	if (c.feature & F2FS_FEATURE_SB_CHKSUM) {
681 		set_sb(checksum_offset, SB_CHKSUM_OFFSET);
682 		set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
683 						SB_CHKSUM_OFFSET));
684 		MSG(1, "Info: SB CRC is set: offset (%d), crc (0x%x)\n",
685 					get_sb(checksum_offset), get_sb(crc));
686 	}
687 
688 	return 0;
689 
690 too_small:
691 	MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
692 	return -1;
693 }
694 
f2fs_init_sit_area(void)695 static int f2fs_init_sit_area(void)
696 {
697 	uint32_t blk_size, seg_size;
698 	uint32_t index = 0;
699 	uint64_t sit_seg_addr = 0;
700 	uint8_t *zero_buf = NULL;
701 
702 	blk_size = 1 << get_sb(log_blocksize);
703 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
704 
705 	zero_buf = calloc(sizeof(uint8_t), seg_size);
706 	if(zero_buf == NULL) {
707 		MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
708 		return -1;
709 	}
710 
711 	sit_seg_addr = get_sb(sit_blkaddr);
712 	sit_seg_addr *= blk_size;
713 
714 	DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
715 	for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
716 		if (dev_fill(zero_buf, sit_seg_addr, seg_size, WRITE_LIFE_NONE)) {
717 			MSG(1, "\tError: While zeroing out the sit area "
718 					"on disk!!!\n");
719 			free(zero_buf);
720 			return -1;
721 		}
722 		sit_seg_addr += seg_size;
723 	}
724 
725 	free(zero_buf);
726 	return 0 ;
727 }
728 
f2fs_init_nat_area(void)729 static int f2fs_init_nat_area(void)
730 {
731 	uint32_t blk_size, seg_size;
732 	uint32_t index = 0;
733 	uint64_t nat_seg_addr = 0;
734 	uint8_t *nat_buf = NULL;
735 
736 	blk_size = 1 << get_sb(log_blocksize);
737 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
738 
739 	nat_buf = calloc(sizeof(uint8_t), seg_size);
740 	if (nat_buf == NULL) {
741 		MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
742 		return -1;
743 	}
744 
745 	nat_seg_addr = get_sb(nat_blkaddr);
746 	nat_seg_addr *= blk_size;
747 
748 	DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
749 	for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
750 		if (dev_fill(nat_buf, nat_seg_addr, seg_size, WRITE_LIFE_NONE)) {
751 			MSG(1, "\tError: While zeroing out the nat area "
752 					"on disk!!!\n");
753 			free(nat_buf);
754 			return -1;
755 		}
756 		nat_seg_addr = nat_seg_addr + (2 * seg_size);
757 	}
758 
759 	free(nat_buf);
760 	return 0 ;
761 }
762 
f2fs_write_check_point_pack(void)763 static int f2fs_write_check_point_pack(void)
764 {
765 	struct f2fs_summary_block *sum;
766 	struct f2fs_journal *journal;
767 	uint32_t blk_size_bytes;
768 	uint32_t nat_bits_bytes, nat_bits_blocks;
769 	unsigned char *nat_bits = NULL, *empty_nat_bits;
770 	uint64_t cp_seg_blk = 0;
771 	uint32_t crc = 0, flags;
772 	unsigned int i;
773 	char *cp_payload = NULL;
774 	char *sum_compact, *sum_compact_p;
775 	struct f2fs_summary *sum_entry;
776 	unsigned short vblocks;
777 	uint32_t used_segments = c.aliased_segments;
778 	int ret = -1;
779 
780 	cp = calloc(F2FS_BLKSIZE, 1);
781 	if (cp == NULL) {
782 		MSG(1, "\tError: Calloc failed for f2fs_checkpoint!!!\n");
783 		return ret;
784 	}
785 
786 	sum = calloc(F2FS_BLKSIZE, 1);
787 	if (sum == NULL) {
788 		MSG(1, "\tError: Calloc failed for summary_node!!!\n");
789 		goto free_cp;
790 	}
791 
792 	sum_compact = calloc(F2FS_BLKSIZE, 1);
793 	if (sum_compact == NULL) {
794 		MSG(1, "\tError: Calloc failed for summary buffer!!!\n");
795 		goto free_sum;
796 	}
797 	sum_compact_p = sum_compact;
798 
799 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
800 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
801 						F2FS_BLKSIZE - 1);
802 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
803 	if (nat_bits == NULL) {
804 		MSG(1, "\tError: Calloc failed for nat bits buffer!!!\n");
805 		goto free_sum_compact;
806 	}
807 
808 	cp_payload = calloc(F2FS_BLKSIZE, 1);
809 	if (cp_payload == NULL) {
810 		MSG(1, "\tError: Calloc failed for cp_payload!!!\n");
811 		goto free_nat_bits;
812 	}
813 
814 	/* 1. cp page 1 of checkpoint pack 1 */
815 	srand((c.fake_seed) ? 0 : time(NULL));
816 	cp->checkpoint_ver = cpu_to_le64(rand() | 0x1);
817 	set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
818 	set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
819 	set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
820 	set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
821 	set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
822 	set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
823 	for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
824 		set_cp(cur_node_segno[i], 0xffffffff);
825 		set_cp(cur_data_segno[i], 0xffffffff);
826 	}
827 
828 	set_cp(cur_node_blkoff[0], c.curseg_offset[CURSEG_HOT_NODE]);
829 	set_cp(cur_node_blkoff[2], c.curseg_offset[CURSEG_COLD_NODE]);
830 	set_cp(cur_data_blkoff[0], c.curseg_offset[CURSEG_HOT_DATA]);
831 	set_cp(cur_data_blkoff[2], c.curseg_offset[CURSEG_COLD_DATA]);
832 	set_cp(valid_block_count, c.curseg_offset[CURSEG_HOT_NODE] +
833 			c.curseg_offset[CURSEG_HOT_DATA] +
834 			c.curseg_offset[CURSEG_COLD_NODE] +
835 			c.curseg_offset[CURSEG_COLD_DATA] +
836 			c.aliased_segments * c.blks_per_seg);
837 	set_cp(rsvd_segment_count, c.reserved_segments);
838 
839 	/*
840 	 * For zoned devices, if zone capacity less than zone size, get
841 	 * overprovision segment count based on usable segments in the device.
842 	 */
843 	set_cp(overprov_segment_count, (f2fs_get_usable_segments(sb) -
844 			get_cp(rsvd_segment_count)) *
845 			c.overprovision / 100);
846 
847 	/*
848 	 * If conf_reserved_sections has a non zero value, overprov_segment_count
849 	 * is set to overprov_segment_count + rsvd_segment_count.
850 	 */
851 	if (c.conf_reserved_sections) {
852 		/*
853 		 * Overprovision segments must be bigger than two sections.
854 		 * In non configurable reserved section case, overprovision
855 		 * segments are always bigger than two sections.
856 		 */
857 		if (get_cp(overprov_segment_count) <
858 					overprovision_segment_buffer(sb)) {
859 			MSG(0, "\tError: Not enough overprovision segments (%u)\n",
860 			    get_cp(overprov_segment_count));
861 			goto free_cp_payload;
862 		}
863 		set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
864 				get_cp(rsvd_segment_count));
865 	 } else {
866 		/*
867 		 * overprov_segment_count must bigger than rsvd_segment_count.
868 		 */
869 		set_cp(overprov_segment_count, max(get_cp(rsvd_segment_count),
870 			get_cp(overprov_segment_count)) + overprovision_segment_buffer(sb));
871 	 }
872 
873 	if (f2fs_get_usable_segments(sb) <= get_cp(overprov_segment_count)) {
874 		MSG(0, "\tError: Not enough segments to create F2FS Volume\n");
875 		goto free_cp_payload;
876 	}
877 	MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
878 	MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
879 					get_cp(overprov_segment_count),
880 					c.reserved_segments);
881 
882 	/* main segments - reserved segments - (node + data segments) */
883 	if (c.feature & F2FS_FEATURE_RO)
884 		used_segments += 2;
885 	else
886 		used_segments += 6;
887 
888 	set_cp(user_block_count, (f2fs_get_usable_segments(sb) -
889 			get_cp(overprov_segment_count)) * c.blks_per_seg);
890 	set_cp(free_segment_count, f2fs_get_usable_segments(sb) -
891 			used_segments);
892 
893 	/* cp page (2), data summaries (1), node summaries (3) */
894 	set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
895 	flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
896 	if (get_cp(cp_pack_total_block_count) <=
897 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
898 		flags |= CP_NAT_BITS_FLAG;
899 
900 	if (c.trimmed)
901 		flags |= CP_TRIMMED_FLAG;
902 
903 	if (c.large_nat_bitmap)
904 		flags |= CP_LARGE_NAT_BITMAP_FLAG;
905 
906 	set_cp(ckpt_flags, flags);
907 	set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
908 	set_cp(valid_node_count, c.curseg_offset[CURSEG_HOT_NODE] +
909 			c.curseg_offset[CURSEG_COLD_NODE]);
910 	set_cp(valid_inode_count, c.curseg_offset[CURSEG_HOT_NODE] +
911 			c.curseg_offset[CURSEG_COLD_NODE]);
912 	set_cp(next_free_nid, c.next_free_nid);
913 	set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
914 			get_sb(log_blocks_per_seg)) / 8);
915 
916 	set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
917 			 get_sb(log_blocks_per_seg)) / 8);
918 
919 	if (c.large_nat_bitmap)
920 		set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
921 	else
922 		set_cp(checksum_offset, CP_CHKSUM_OFFSET);
923 
924 	crc = f2fs_checkpoint_chksum(cp);
925 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
926 							cpu_to_le32(crc);
927 
928 	blk_size_bytes = 1 << get_sb(log_blocksize);
929 
930 	if (blk_size_bytes != F2FS_BLKSIZE) {
931 		MSG(1, "\tError: Wrong block size %d / %d!!!\n",
932 					blk_size_bytes, F2FS_BLKSIZE);
933 		goto free_cp_payload;
934 	}
935 
936 	cp_seg_blk = get_sb(segment0_blkaddr);
937 
938 	DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
939 						cp_seg_blk);
940 	if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
941 		MSG(1, "\tError: While writing the cp to disk!!!\n");
942 		goto free_cp_payload;
943 	}
944 
945 	for (i = 0; i < get_sb(cp_payload); i++) {
946 		cp_seg_blk++;
947 		if (dev_fill_block(cp_payload, cp_seg_blk, WRITE_LIFE_NONE)) {
948 			MSG(1, "\tError: While zeroing out the sit bitmap area "
949 					"on disk!!!\n");
950 			goto free_cp_payload;
951 		}
952 	}
953 
954 	/* Prepare and write Segment summary for HOT/WARM/COLD DATA
955 	 *
956 	 * The structure of compact summary
957 	 * +-------------------+
958 	 * | nat_journal       |
959 	 * +-------------------+
960 	 * | sit_journal       |
961 	 * +-------------------+
962 	 * | hot data summary  |
963 	 * +-------------------+
964 	 * | warm data summary |
965 	 * +-------------------+
966 	 * | cold data summary |
967 	 * +-------------------+
968 	*/
969 
970 	/* nat_sjournal */
971 	journal = &c.nat_jnl;
972 	memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
973 	sum_compact_p += SUM_JOURNAL_SIZE;
974 
975 	/* sit_journal */
976 	journal = &c.sit_jnl;
977 
978 	if (c.feature & F2FS_FEATURE_RO) {
979 		i = CURSEG_RO_HOT_DATA;
980 		vblocks = le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
981 		journal->sit_j.entries[i].segno = cp->cur_data_segno[0];
982 		journal->sit_j.entries[i].se.vblocks =
983 				cpu_to_le16(vblocks | (CURSEG_HOT_DATA << 10));
984 
985 		i = CURSEG_RO_HOT_NODE;
986 		vblocks = le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
987 		journal->sit_j.entries[i].segno = cp->cur_node_segno[0];
988 		journal->sit_j.entries[i].se.vblocks |=
989 				cpu_to_le16(vblocks | (CURSEG_HOT_NODE << 10));
990 
991 		journal->n_sits = cpu_to_le16(2);
992 	} else {
993 		for (i = CURSEG_HOT_DATA; i < NR_CURSEG_TYPE; i++) {
994 			if (i < NR_CURSEG_DATA_TYPE)
995 				journal->sit_j.entries[i].segno =
996 					cp->cur_data_segno[i];
997 
998 			else
999 				journal->sit_j.entries[i].segno =
1000 					cp->cur_node_segno[i - NR_CURSEG_DATA_TYPE];
1001 
1002 			vblocks =
1003 				le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
1004 			journal->sit_j.entries[i].se.vblocks =
1005 						cpu_to_le16(vblocks | (i << 10));
1006 		}
1007 
1008 		journal->n_sits = cpu_to_le16(6);
1009 	}
1010 
1011 	memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
1012 	sum_compact_p += SUM_JOURNAL_SIZE;
1013 
1014 	/* hot data summary */
1015 	memset(sum, 0, F2FS_BLKSIZE);
1016 	SET_SUM_TYPE(sum, SUM_TYPE_DATA);
1017 
1018 	sum_entry = (struct f2fs_summary *)sum_compact_p;
1019 	memcpy(sum_entry, c.sum[CURSEG_HOT_DATA],
1020 			sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1021 
1022 	/* warm data summary, nothing to do */
1023 	/* cold data summary, nothing to do */
1024 
1025 	cp_seg_blk++;
1026 	DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
1027 			cp_seg_blk);
1028 	if (dev_write_block(sum_compact, cp_seg_blk, WRITE_LIFE_NONE)) {
1029 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1030 		goto free_cp_payload;
1031 	}
1032 
1033 	/* Prepare and write Segment summary for HOT_NODE */
1034 	memset(sum, 0, F2FS_BLKSIZE);
1035 	SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1036 	memcpy(sum->entries, c.sum[CURSEG_HOT_NODE],
1037 			sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1038 
1039 	cp_seg_blk++;
1040 	DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
1041 			cp_seg_blk);
1042 	if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1043 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1044 		goto free_cp_payload;
1045 	}
1046 
1047 	/* Fill segment summary for WARM_NODE to zero. */
1048 	memset(sum, 0, F2FS_BLKSIZE);
1049 	SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1050 
1051 	cp_seg_blk++;
1052 	DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
1053 			cp_seg_blk);
1054 	if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1055 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1056 		goto free_cp_payload;
1057 	}
1058 
1059 	/* Prepare and write Segment summary for COLD_NODE */
1060 	memset(sum, 0, F2FS_BLKSIZE);
1061 	SET_SUM_TYPE(sum, SUM_TYPE_NODE);
1062 	memcpy(sum->entries, c.sum[CURSEG_COLD_NODE],
1063 			sizeof(struct f2fs_summary) * MAX_CACHE_SUMS);
1064 
1065 	cp_seg_blk++;
1066 	DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
1067 			cp_seg_blk);
1068 	if (dev_write_block(sum, cp_seg_blk, WRITE_LIFE_NONE)) {
1069 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
1070 		goto free_cp_payload;
1071 	}
1072 
1073 	/* cp page2 */
1074 	cp_seg_blk++;
1075 	DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
1076 	if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1077 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1078 		goto free_cp_payload;
1079 	}
1080 
1081 	/* write NAT bits, if possible */
1082 	if (flags & CP_NAT_BITS_FLAG) {
1083 		uint32_t i;
1084 
1085 		*(__le64 *)nat_bits = get_cp_crc(cp);
1086 		empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
1087 		memset(empty_nat_bits, 0xff, nat_bits_bytes);
1088 		test_and_clear_bit_le(0, empty_nat_bits);
1089 
1090 		/* write the last blocks in cp pack */
1091 		cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
1092 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1093 
1094 		DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
1095 					cp_seg_blk);
1096 
1097 		for (i = 0; i < nat_bits_blocks; i++) {
1098 			if (dev_write_block(nat_bits + i *
1099 						F2FS_BLKSIZE, cp_seg_blk + i,
1100 						WRITE_LIFE_NONE)) {
1101 				MSG(1, "\tError: write NAT bits to disk!!!\n");
1102 				goto free_cp_payload;
1103 			}
1104 		}
1105 	}
1106 
1107 	/* cp page 1 of check point pack 2
1108 	 * Initialize other checkpoint pack with version zero
1109 	 */
1110 	cp->checkpoint_ver = 0;
1111 
1112 	crc = f2fs_checkpoint_chksum(cp);
1113 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
1114 							cpu_to_le32(crc);
1115 	cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
1116 	DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1117 				cp_seg_blk);
1118 	if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1119 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1120 		goto free_cp_payload;
1121 	}
1122 
1123 	for (i = 0; i < get_sb(cp_payload); i++) {
1124 		cp_seg_blk++;
1125 		if (dev_fill_block(cp_payload, cp_seg_blk, WRITE_LIFE_NONE)) {
1126 			MSG(1, "\tError: While zeroing out the sit bitmap area "
1127 					"on disk!!!\n");
1128 			goto free_cp_payload;
1129 		}
1130 	}
1131 
1132 	/* cp page 2 of check point pack 2 */
1133 	cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
1134 					get_sb(cp_payload) - 1);
1135 	DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
1136 				cp_seg_blk);
1137 	if (dev_write_block(cp, cp_seg_blk, WRITE_LIFE_NONE)) {
1138 		MSG(1, "\tError: While writing the cp to disk!!!\n");
1139 		goto free_cp_payload;
1140 	}
1141 
1142 	ret = 0;
1143 
1144 free_cp_payload:
1145 	free(cp_payload);
1146 free_nat_bits:
1147 	free(nat_bits);
1148 free_sum_compact:
1149 	free(sum_compact);
1150 free_sum:
1151 	free(sum);
1152 free_cp:
1153 	free(cp);
1154 	return ret;
1155 }
1156 
f2fs_write_super_block(void)1157 static int f2fs_write_super_block(void)
1158 {
1159 	int index;
1160 	uint8_t *zero_buff;
1161 
1162 	zero_buff = calloc(F2FS_BLKSIZE, 1);
1163 	if (zero_buff == NULL) {
1164 		MSG(1, "\tError: Calloc Failed for super_blk_zero_buf!!!\n");
1165 		return -1;
1166 	}
1167 
1168 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1169 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1170 	for (index = 0; index < 2; index++) {
1171 		if (dev_write_block(zero_buff, index, WRITE_LIFE_NONE)) {
1172 			MSG(1, "\tError: While while writing super_blk "
1173 					"on disk!!! index : %d\n", index);
1174 			free(zero_buff);
1175 			return -1;
1176 		}
1177 	}
1178 
1179 	free(zero_buff);
1180 	return 0;
1181 }
1182 
1183 #ifndef WITH_ANDROID
f2fs_discard_obsolete_dnode(void)1184 static int f2fs_discard_obsolete_dnode(void)
1185 {
1186 	struct f2fs_node *raw_node;
1187 	uint64_t next_blkaddr = 0, offset;
1188 	u64 end_blkaddr = (get_sb(segment_count_main) <<
1189 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1190 	uint64_t start_inode_pos = get_sb(main_blkaddr);
1191 	uint64_t last_inode_pos;
1192 
1193 	if (c.zoned_mode || c.feature & F2FS_FEATURE_RO)
1194 		return 0;
1195 
1196 	raw_node = calloc(F2FS_BLKSIZE, 1);
1197 	if (raw_node == NULL) {
1198 		MSG(1, "\tError: Calloc Failed for discard_raw_node!!!\n");
1199 		return -1;
1200 	}
1201 
1202 	/* avoid power-off-recovery based on roll-forward policy */
1203 	offset = get_sb(main_blkaddr);
1204 	offset += c.cur_seg[CURSEG_WARM_NODE] * c.blks_per_seg;
1205 
1206 	last_inode_pos = start_inode_pos +
1207 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg +
1208 		c.curseg_offset[CURSEG_COLD_NODE] - 1;
1209 
1210 	do {
1211 		if (offset < get_sb(main_blkaddr) || offset >= end_blkaddr)
1212 			break;
1213 
1214 		if (dev_read_block(raw_node, offset)) {
1215 			MSG(1, "\tError: While traversing direct node!!!\n");
1216 			free(raw_node);
1217 			return -1;
1218 		}
1219 
1220 		next_blkaddr = le32_to_cpu(F2FS_NODE_FOOTER(raw_node)->next_blkaddr);
1221 		memset(raw_node, 0, F2FS_BLKSIZE);
1222 
1223 		DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
1224 		if (dev_write_block(raw_node, offset,
1225 				    f2fs_io_type_to_rw_hint(CURSEG_WARM_NODE))) {
1226 			MSG(1, "\tError: While discarding direct node!!!\n");
1227 			free(raw_node);
1228 			return -1;
1229 		}
1230 		offset = next_blkaddr;
1231 		/* should avoid recursive chain due to stale data */
1232 		if (offset >= start_inode_pos || offset <= last_inode_pos)
1233 			break;
1234 	} while (1);
1235 
1236 	free(raw_node);
1237 	return 0;
1238 }
1239 #endif
1240 
alloc_next_free_block(int curseg_type)1241 static block_t alloc_next_free_block(int curseg_type)
1242 {
1243 	block_t blkaddr;
1244 
1245 	blkaddr = get_sb(main_blkaddr) +
1246 			c.cur_seg[curseg_type] * c.blks_per_seg +
1247 			c.curseg_offset[curseg_type];
1248 
1249 	c.curseg_offset[curseg_type]++;
1250 
1251 	return blkaddr;
1252 }
1253 
update_sit_journal(int curseg_type)1254 void update_sit_journal(int curseg_type)
1255 {
1256 	struct f2fs_journal *sit_jnl = &c.sit_jnl;
1257 	unsigned short vblocks;
1258 	int idx = curseg_type;
1259 
1260 	if (c.feature & F2FS_FEATURE_RO) {
1261 		if (curseg_type < NR_CURSEG_DATA_TYPE)
1262 			idx = CURSEG_RO_HOT_DATA;
1263 		else
1264 			idx = CURSEG_RO_HOT_NODE;
1265 	}
1266 
1267 	f2fs_set_bit(c.curseg_offset[curseg_type] - 1,
1268 		(char *)sit_jnl->sit_j.entries[idx].se.valid_map);
1269 
1270 	vblocks = le16_to_cpu(sit_jnl->sit_j.entries[idx].se.vblocks);
1271 	sit_jnl->sit_j.entries[idx].se.vblocks = cpu_to_le16(vblocks + 1);
1272 }
1273 
update_nat_journal(nid_t nid,block_t blkaddr)1274 void update_nat_journal(nid_t nid, block_t blkaddr)
1275 {
1276 	struct f2fs_journal *nat_jnl = &c.nat_jnl;
1277 	unsigned short n_nats = le16_to_cpu(nat_jnl->n_nats);
1278 
1279 	nat_jnl->nat_j.entries[n_nats].nid = cpu_to_le32(nid);
1280 	nat_jnl->nat_j.entries[n_nats].ne.version = 0;
1281 	nat_jnl->nat_j.entries[n_nats].ne.ino = cpu_to_le32(nid);
1282 	nat_jnl->nat_j.entries[n_nats].ne.block_addr = cpu_to_le32(blkaddr);
1283 	nat_jnl->n_nats = cpu_to_le16(n_nats + 1);
1284 }
1285 
update_summary_entry(int curseg_type,nid_t nid,unsigned short ofs_in_node)1286 void update_summary_entry(int curseg_type, nid_t nid,
1287 					unsigned short ofs_in_node)
1288 {
1289 	struct f2fs_summary *sum;
1290 	unsigned int curofs = c.curseg_offset[curseg_type] - 1;
1291 
1292 	assert(curofs < MAX_CACHE_SUMS);
1293 
1294 	sum = c.sum[curseg_type] + curofs;
1295 	sum->nid = cpu_to_le32(nid);
1296 	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
1297 }
1298 
add_dentry(struct f2fs_dentry_block * dent_blk,unsigned int * didx,const char * name,uint32_t ino,u8 type)1299 static void add_dentry(struct f2fs_dentry_block *dent_blk, unsigned int *didx,
1300 		const char *name, uint32_t ino, u8 type)
1301 {
1302 	int len = strlen(name);
1303 	f2fs_hash_t hash;
1304 
1305 	if (name[0] == '.' && (len == 1 || (len == 2 && name[1] == '.')))
1306 		hash = 0;
1307 	else
1308 		hash = f2fs_dentry_hash(0, 0, (unsigned char *)name, len);
1309 
1310 	F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).hash_code = cpu_to_le32(hash);
1311 	F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).ino = cpu_to_le32(ino);
1312 	F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).name_len = cpu_to_le16(len);
1313 	F2FS_DENTRY_BLOCK_DENTRY(dent_blk, *didx).file_type = type;
1314 
1315 	while (len > F2FS_SLOT_LEN) {
1316 		memcpy(F2FS_DENTRY_BLOCK_FILENAME(dent_blk, *didx), name,
1317 				F2FS_SLOT_LEN);
1318 		test_and_set_bit_le(*didx, dent_blk->dentry_bitmap);
1319 		len -= (int)F2FS_SLOT_LEN;
1320 		name += F2FS_SLOT_LEN;
1321 		(*didx)++;
1322 	}
1323 	memcpy(F2FS_DENTRY_BLOCK_FILENAME(dent_blk, *didx), name, len);
1324 	test_and_set_bit_le(*didx, dent_blk->dentry_bitmap);
1325 	(*didx)++;
1326 }
1327 
f2fs_add_default_dentry_root(void)1328 static block_t f2fs_add_default_dentry_root(void)
1329 {
1330 	struct f2fs_dentry_block *dent_blk = NULL;
1331 	block_t data_blkaddr;
1332 	unsigned int didx = 0;
1333 
1334 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1335 	if(dent_blk == NULL) {
1336 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1337 		return 0;
1338 	}
1339 
1340 	add_dentry(dent_blk, &didx, ".",
1341 			le32_to_cpu(sb->root_ino), F2FS_FT_DIR);
1342 	add_dentry(dent_blk, &didx, "..",
1343 			le32_to_cpu(sb->root_ino), F2FS_FT_DIR);
1344 
1345 	if (c.lpf_ino)
1346 		add_dentry(dent_blk, &didx, LPF, c.lpf_ino, F2FS_FT_DIR);
1347 
1348 	if (c.aliased_devices) {
1349 		int i, dev_off = 0;
1350 
1351 		for (i = 1; i < c.ndevs; i++) {
1352 			if (!device_is_aliased(i))
1353 				continue;
1354 
1355 			add_dentry(dent_blk, &didx, c.devices[i].alias_filename,
1356 					c.first_alias_ino + dev_off,
1357 					F2FS_FT_REG_FILE);
1358 			dev_off++;
1359 		}
1360 	}
1361 
1362 	data_blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1363 
1364 	DBG(1, "\tWriting default dentry root, at offset 0x%x\n", data_blkaddr);
1365 	if (dev_write_block(dent_blk, data_blkaddr,
1366 			    f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1367 		MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1368 		free(dent_blk);
1369 		return 0;
1370 	}
1371 
1372 	update_sit_journal(CURSEG_HOT_DATA);
1373 	update_summary_entry(CURSEG_HOT_DATA, le32_to_cpu(sb->root_ino), 0);
1374 
1375 	free(dent_blk);
1376 	return data_blkaddr;
1377 }
1378 
f2fs_write_root_inode(void)1379 static int f2fs_write_root_inode(void)
1380 {
1381 	struct f2fs_node *raw_node = NULL;
1382 	block_t data_blkaddr;
1383 	block_t node_blkaddr;
1384 
1385 	raw_node = calloc(F2FS_BLKSIZE, 1);
1386 	if (raw_node == NULL) {
1387 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1388 		return -1;
1389 	}
1390 
1391 	f2fs_init_inode(sb, raw_node, le32_to_cpu(sb->root_ino),
1392 						mkfs_time, 0x41ed);
1393 
1394 	if (c.lpf_ino)
1395 		raw_node->i.i_links = cpu_to_le32(3);
1396 
1397 	data_blkaddr = f2fs_add_default_dentry_root();
1398 	if (data_blkaddr == 0) {
1399 		MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1400 		free(raw_node);
1401 		return -1;
1402 	}
1403 
1404 	raw_node->i.i_addr[get_extra_isize(raw_node)] =
1405 				cpu_to_le32(data_blkaddr);
1406 
1407 	node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1408 	F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1409 
1410 	DBG(1, "\tWriting root inode (hot node), offset 0x%x\n", node_blkaddr);
1411 	if (write_inode(raw_node, node_blkaddr,
1412 			f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1413 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1414 		free(raw_node);
1415 		return -1;
1416 	}
1417 
1418 	update_nat_journal(le32_to_cpu(sb->root_ino), node_blkaddr);
1419 	update_sit_journal(CURSEG_HOT_NODE);
1420 	update_summary_entry(CURSEG_HOT_NODE, le32_to_cpu(sb->root_ino), 0);
1421 
1422 	free(raw_node);
1423 	return 0;
1424 }
1425 
f2fs_write_default_quota(int qtype,__le32 raw_id)1426 static int f2fs_write_default_quota(int qtype, __le32 raw_id)
1427 {
1428 	char *filebuf = calloc(F2FS_BLKSIZE, 2);
1429 	int file_magics[] = INITQMAGICS;
1430 	struct v2_disk_dqheader ddqheader;
1431 	struct v2_disk_dqinfo ddqinfo;
1432 	struct v2r1_disk_dqblk dqblk;
1433 	block_t blkaddr;
1434 	uint64_t icnt = 1, bcnt = 1;
1435 	int i;
1436 
1437 	if (filebuf == NULL) {
1438 		MSG(1, "\tError: Calloc Failed for filebuf!!!\n");
1439 		return 0;
1440 	}
1441 
1442 	/* Write basic quota header */
1443 	ddqheader.dqh_magic = cpu_to_le32(file_magics[qtype]);
1444 	/* only support QF_VFSV1 */
1445 	ddqheader.dqh_version = cpu_to_le32(1);
1446 
1447 	memcpy(filebuf, &ddqheader, sizeof(ddqheader));
1448 
1449 	/* Fill Initial quota file content */
1450 	ddqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1451 	ddqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1452 	ddqinfo.dqi_flags = cpu_to_le32(0);
1453 	ddqinfo.dqi_blocks = cpu_to_le32(QT_TREEOFF + 5);
1454 	ddqinfo.dqi_free_blk = cpu_to_le32(0);
1455 	ddqinfo.dqi_free_entry = cpu_to_le32(5);
1456 
1457 	memcpy(filebuf + V2_DQINFOOFF, &ddqinfo, sizeof(ddqinfo));
1458 
1459 	filebuf[1024] = 2;
1460 	filebuf[2048] = 3;
1461 	filebuf[3072] = 4;
1462 	filebuf[4096] = 5;
1463 
1464 	filebuf[5120 + 8] = 1;
1465 
1466 	dqblk.dqb_id = raw_id;
1467 	dqblk.dqb_pad = cpu_to_le32(0);
1468 	dqblk.dqb_ihardlimit = cpu_to_le64(0);
1469 	dqblk.dqb_isoftlimit = cpu_to_le64(0);
1470 	if (c.lpf_ino) {
1471 		icnt++;
1472 		bcnt++;
1473 	}
1474 	if (c.aliased_devices) {
1475 		icnt += c.aliased_devices;
1476 		bcnt += c.aliased_segments * c.blks_per_seg;
1477 	}
1478 	dqblk.dqb_curinodes = cpu_to_le64(icnt);
1479 	dqblk.dqb_bhardlimit = cpu_to_le64(0);
1480 	dqblk.dqb_bsoftlimit = cpu_to_le64(0);
1481 	dqblk.dqb_curspace = cpu_to_le64(F2FS_BLKSIZE * bcnt);
1482 	dqblk.dqb_btime = cpu_to_le64(0);
1483 	dqblk.dqb_itime = cpu_to_le64(0);
1484 
1485 	memcpy(filebuf + 5136, &dqblk, sizeof(struct v2r1_disk_dqblk));
1486 
1487 	/* Write quota blocks */
1488 	for (i = 0; i < QUOTA_DATA; i++) {
1489 		blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1490 
1491 		if (dev_write_block(filebuf + i * F2FS_BLKSIZE, blkaddr,
1492 				    f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1493 			MSG(1, "\tError: While writing the quota_blk to disk!!!\n");
1494 			free(filebuf);
1495 			return 0;
1496 		}
1497 
1498 		update_sit_journal(CURSEG_HOT_DATA);
1499 		update_summary_entry(CURSEG_HOT_DATA,
1500 					le32_to_cpu(sb->qf_ino[qtype]), i);
1501 		DBG(1, "\tWriting quota data, at offset %08x (%d/%d)\n",
1502 						blkaddr, i + 1, QUOTA_DATA);
1503 
1504 	}
1505 
1506 	free(filebuf);
1507 	return blkaddr + 1 - QUOTA_DATA;
1508 }
1509 
f2fs_write_qf_inode(int qtype)1510 static int f2fs_write_qf_inode(int qtype)
1511 {
1512 	struct f2fs_node *raw_node = NULL;
1513 	block_t data_blkaddr;
1514 	block_t node_blkaddr;
1515 	__le32 raw_id;
1516 	int i;
1517 
1518 	raw_node = calloc(F2FS_BLKSIZE, 1);
1519 	if (raw_node == NULL) {
1520 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1521 		return -1;
1522 	}
1523 	f2fs_init_inode(sb, raw_node,
1524 			le32_to_cpu(sb->qf_ino[qtype]), mkfs_time, 0x8180);
1525 
1526 	raw_node->i.i_size = cpu_to_le64(1024 * 6);
1527 	raw_node->i.i_blocks = cpu_to_le64(1 + QUOTA_DATA);
1528 	raw_node->i.i_flags = cpu_to_le32(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
1529 
1530 	node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1531 	F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1532 
1533 	if (qtype == 0)
1534 		raw_id = raw_node->i.i_uid;
1535 	else if (qtype == 1)
1536 		raw_id = raw_node->i.i_gid;
1537 	else if (qtype == 2)
1538 		raw_id = raw_node->i.i_projid;
1539 	else
1540 		ASSERT(0);
1541 
1542 	/* write quota blocks */
1543 	data_blkaddr = f2fs_write_default_quota(qtype, raw_id);
1544 	if (data_blkaddr == 0) {
1545 		free(raw_node);
1546 		return -1;
1547 	}
1548 
1549 	for (i = 0; i < QUOTA_DATA; i++)
1550 		raw_node->i.i_addr[get_extra_isize(raw_node) + i] =
1551 					cpu_to_le32(data_blkaddr + i);
1552 
1553 	DBG(1, "\tWriting quota inode (hot node), offset 0x%x\n", node_blkaddr);
1554 	if (write_inode(raw_node, node_blkaddr,
1555 			f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1556 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1557 		free(raw_node);
1558 		return -1;
1559 	}
1560 
1561 	update_nat_journal(le32_to_cpu(sb->qf_ino[qtype]), node_blkaddr);
1562 	update_sit_journal(CURSEG_HOT_NODE);
1563 	update_summary_entry(CURSEG_HOT_NODE, le32_to_cpu(sb->qf_ino[qtype]), 0);
1564 
1565 	free(raw_node);
1566 	return 0;
1567 }
1568 
f2fs_update_nat_default(void)1569 static int f2fs_update_nat_default(void)
1570 {
1571 	struct f2fs_nat_block *nat_blk = NULL;
1572 	uint64_t nat_seg_blk_offset = 0;
1573 
1574 	nat_blk = calloc(F2FS_BLKSIZE, 1);
1575 	if(nat_blk == NULL) {
1576 		MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
1577 		return -1;
1578 	}
1579 
1580 	/* update node nat */
1581 	nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
1582 	nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
1583 
1584 	/* update meta nat */
1585 	nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
1586 	nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
1587 
1588 	nat_seg_blk_offset = get_sb(nat_blkaddr);
1589 
1590 	DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
1591 					nat_seg_blk_offset);
1592 	if (dev_write_block(nat_blk, nat_seg_blk_offset, WRITE_LIFE_NONE)) {
1593 		MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
1594 		free(nat_blk);
1595 		return -1;
1596 	}
1597 
1598 	free(nat_blk);
1599 	return 0;
1600 }
1601 
f2fs_add_default_dentry_lpf(void)1602 static block_t f2fs_add_default_dentry_lpf(void)
1603 {
1604 	struct f2fs_dentry_block *dent_blk;
1605 	block_t data_blkaddr;
1606 	unsigned int didx = 0;
1607 
1608 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1609 	if (dent_blk == NULL) {
1610 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1611 		return 0;
1612 	}
1613 
1614 	add_dentry(dent_blk, &didx, ".", c.lpf_ino, F2FS_FT_DIR);
1615 	add_dentry(dent_blk, &didx, "..", c.lpf_ino, F2FS_FT_DIR);
1616 
1617 	data_blkaddr = alloc_next_free_block(CURSEG_HOT_DATA);
1618 
1619 	DBG(1, "\tWriting default dentry lost+found, at offset 0x%x\n",
1620 							data_blkaddr);
1621 	if (dev_write_block(dent_blk, data_blkaddr,
1622 			    f2fs_io_type_to_rw_hint(CURSEG_HOT_DATA))) {
1623 		MSG(1, "\tError While writing the dentry_blk to disk!!!\n");
1624 		free(dent_blk);
1625 		return 0;
1626 	}
1627 
1628 	update_sit_journal(CURSEG_HOT_DATA);
1629 	update_summary_entry(CURSEG_HOT_DATA, c.lpf_ino, 0);
1630 
1631 	free(dent_blk);
1632 	return data_blkaddr;
1633 }
1634 
f2fs_write_lpf_inode(void)1635 static int f2fs_write_lpf_inode(void)
1636 {
1637 	struct f2fs_node *raw_node;
1638 	block_t data_blkaddr;
1639 	block_t node_blkaddr;
1640 	int err = 0;
1641 
1642 	ASSERT(c.lpf_ino);
1643 
1644 	raw_node = calloc(F2FS_BLKSIZE, 1);
1645 	if (raw_node == NULL) {
1646 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1647 		return -1;
1648 	}
1649 
1650 	f2fs_init_inode(sb, raw_node, c.lpf_ino, mkfs_time, 0x41c0);
1651 
1652 	raw_node->i.i_pino = sb->root_ino;
1653 	raw_node->i.i_namelen = cpu_to_le32(strlen(LPF));
1654 	memcpy(raw_node->i.i_name, LPF, strlen(LPF));
1655 
1656 	node_blkaddr = alloc_next_free_block(CURSEG_HOT_NODE);
1657 	F2FS_NODE_FOOTER(raw_node)->next_blkaddr = cpu_to_le32(node_blkaddr + 1);
1658 
1659 	data_blkaddr = f2fs_add_default_dentry_lpf();
1660 	if (data_blkaddr == 0) {
1661 		MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
1662 		err = -1;
1663 		goto exit;
1664 	}
1665 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blkaddr);
1666 
1667 	DBG(1, "\tWriting lost+found inode (hot node), offset 0x%x\n",
1668 								node_blkaddr);
1669 	if (write_inode(raw_node, node_blkaddr,
1670 			f2fs_io_type_to_rw_hint(CURSEG_HOT_NODE)) < 0) {
1671 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1672 		err = -1;
1673 		goto exit;
1674 	}
1675 
1676 	update_nat_journal(c.lpf_ino, node_blkaddr);
1677 	update_sit_journal(CURSEG_HOT_NODE);
1678 	update_summary_entry(CURSEG_HOT_NODE, c.lpf_ino, 0);
1679 
1680 exit:
1681 	free(raw_node);
1682 	return err;
1683 }
1684 
allocate_blocks_for_aliased_device(struct f2fs_node * raw_node,unsigned int dev_num)1685 static void allocate_blocks_for_aliased_device(struct f2fs_node *raw_node,
1686 		unsigned int dev_num)
1687 {
1688 	uint32_t start_segno = (c.devices[dev_num].start_blkaddr -
1689 			get_sb(main_blkaddr)) / c.blks_per_seg;
1690 	uint32_t end_segno = (c.devices[dev_num].end_blkaddr -
1691 			get_sb(main_blkaddr) + 1) / c.blks_per_seg;
1692 	uint32_t segno;
1693 	uint64_t blkcnt;
1694 	struct f2fs_sit_block *sit_blk = calloc(F2FS_BLKSIZE, 1);
1695 
1696 	ASSERT(sit_blk);
1697 
1698 	for (segno = start_segno; segno < end_segno; segno++) {
1699 		struct f2fs_sit_entry *sit;
1700 		uint64_t sit_blk_addr = get_sb(sit_blkaddr) +
1701 			(segno / SIT_ENTRY_PER_BLOCK);
1702 
1703 		ASSERT(dev_read_block(sit_blk, sit_blk_addr) >= 0);
1704 		sit = &sit_blk->entries[segno % SIT_ENTRY_PER_BLOCK];
1705 		memset(&sit->valid_map, 0xFF, SIT_VBLOCK_MAP_SIZE);
1706 		sit->vblocks = cpu_to_le16((CURSEG_COLD_DATA <<
1707 					SIT_VBLOCKS_SHIFT) | c.blks_per_seg);
1708 		sit->mtime = cpu_to_le64(mkfs_time);
1709 		ASSERT(dev_write_block(sit_blk, sit_blk_addr,
1710 			f2fs_io_type_to_rw_hint(CURSEG_COLD_DATA)) >= 0);
1711 	}
1712 
1713 	blkcnt = (end_segno - start_segno) * c.blks_per_seg;
1714 	raw_node->i.i_size = cpu_to_le64(blkcnt << get_sb(log_blocksize));
1715 	raw_node->i.i_blocks = cpu_to_le64(blkcnt + 1);
1716 
1717 	raw_node->i.i_ext.fofs = cpu_to_le32(0);
1718 	raw_node->i.i_ext.blk_addr =
1719 		cpu_to_le32(c.devices[dev_num].start_blkaddr);
1720 	raw_node->i.i_ext.len = cpu_to_le32(blkcnt);
1721 
1722 	free(sit_blk);
1723 }
1724 
f2fs_write_alias_inodes(void)1725 static int f2fs_write_alias_inodes(void)
1726 {
1727 	struct f2fs_node *raw_node;
1728 	block_t node_blkaddr;
1729 	int err = 0;
1730 	unsigned int i, dev_off = 0;
1731 
1732 	ASSERT(c.aliased_devices);
1733 
1734 	raw_node = calloc(F2FS_BLKSIZE, 1);
1735 	if (raw_node == NULL) {
1736 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1737 		return -1;
1738 	}
1739 
1740 	for (i = 1; i < c.ndevs; i++) {
1741 		const char *filename;
1742 		nid_t ino;
1743 
1744 		if (!device_is_aliased(i))
1745 			continue;
1746 
1747 		ino = c.first_alias_ino + dev_off;
1748 		dev_off++;
1749 		f2fs_init_inode(sb, raw_node, ino, mkfs_time, 0x81c0);
1750 
1751 		raw_node->i.i_flags = cpu_to_le32(F2FS_DEVICE_ALIAS_FL);
1752 		raw_node->i.i_inline = F2FS_PIN_FILE;
1753 		raw_node->i.i_pino = sb->root_ino;
1754 		filename = c.devices[i].alias_filename;
1755 		raw_node->i.i_namelen = cpu_to_le32(strlen(filename));
1756 		memcpy(raw_node->i.i_name, filename, strlen(filename));
1757 
1758 		node_blkaddr = alloc_next_free_block(CURSEG_COLD_NODE);
1759 		F2FS_NODE_FOOTER(raw_node)->next_blkaddr =
1760 			cpu_to_le32(node_blkaddr + 1);
1761 
1762 		allocate_blocks_for_aliased_device(raw_node, i);
1763 
1764 		DBG(1, "\tWriting aliased device inode (cold node), "
1765 				"offset 0x%x\n", node_blkaddr);
1766 		if (write_inode(raw_node, node_blkaddr,
1767 			    f2fs_io_type_to_rw_hint(CURSEG_COLD_NODE)) < 0) {
1768 			MSG(1, "\tError: While writing the raw_node to "
1769 					"disk!!!\n");
1770 			err = -1;
1771 			goto exit;
1772 		}
1773 
1774 		update_nat_journal(ino, node_blkaddr);
1775 		update_sit_journal(CURSEG_COLD_NODE);
1776 		update_summary_entry(CURSEG_COLD_NODE, ino, 0);
1777 	}
1778 
1779 exit:
1780 	free(raw_node);
1781 	return err;
1782 }
1783 
f2fs_create_root_dir(void)1784 static int f2fs_create_root_dir(void)
1785 {
1786 	enum quota_type qtype;
1787 	int err = 0;
1788 
1789 	err = f2fs_write_root_inode();
1790 	if (err < 0) {
1791 		MSG(1, "\tError: Failed to write root inode!!!\n");
1792 		goto exit;
1793 	}
1794 
1795 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++)  {
1796 		if (!((1 << qtype) & c.quota_bits))
1797 			continue;
1798 		err = f2fs_write_qf_inode(qtype);
1799 		if (err < 0) {
1800 			MSG(1, "\tError: Failed to write quota inode!!!\n");
1801 			goto exit;
1802 		}
1803 	}
1804 
1805 	if (c.feature & F2FS_FEATURE_LOST_FOUND) {
1806 		err = f2fs_write_lpf_inode();
1807 		if (err < 0) {
1808 			MSG(1, "\tError: Failed to write lost+found inode!!!\n");
1809 			goto exit;
1810 		}
1811 	}
1812 
1813 	if (c.aliased_devices) {
1814 		err = f2fs_write_alias_inodes();
1815 		if (err < 0) {
1816 			MSG(1, "\tError: Failed to write aliased device "
1817 				"inodes!!!\n");
1818 			goto exit;
1819 		}
1820 	}
1821 
1822 #ifndef WITH_ANDROID
1823 	err = f2fs_discard_obsolete_dnode();
1824 	if (err < 0) {
1825 		MSG(1, "\tError: Failed to discard obsolete dnode!!!\n");
1826 		goto exit;
1827 	}
1828 #endif
1829 
1830 	err = f2fs_update_nat_default();
1831 	if (err < 0) {
1832 		MSG(1, "\tError: Failed to update NAT for root!!!\n");
1833 		goto exit;
1834 	}
1835 exit:
1836 	if (err)
1837 		MSG(1, "\tError: Could not create the root directory!!!\n");
1838 
1839 	return err;
1840 }
1841 
f2fs_format_device(void)1842 int f2fs_format_device(void)
1843 {
1844 	int err = 0;
1845 
1846 	err= f2fs_prepare_super_block();
1847 	if (err < 0) {
1848 		MSG(0, "\tError: Failed to prepare a super block!!!\n");
1849 		goto exit;
1850 	}
1851 
1852 	if (c.trim) {
1853 		err = f2fs_trim_devices();
1854 		if (err < 0) {
1855 			MSG(0, "\tError: Failed to trim whole device!!!\n");
1856 			goto exit;
1857 		}
1858 	}
1859 
1860 	err = f2fs_init_sit_area();
1861 	if (err < 0) {
1862 		MSG(0, "\tError: Failed to initialise the SIT AREA!!!\n");
1863 		goto exit;
1864 	}
1865 
1866 	err = f2fs_init_nat_area();
1867 	if (err < 0) {
1868 		MSG(0, "\tError: Failed to initialise the NAT AREA!!!\n");
1869 		goto exit;
1870 	}
1871 
1872 	err = f2fs_create_root_dir();
1873 	if (err < 0) {
1874 		MSG(0, "\tError: Failed to create the root directory!!!\n");
1875 		goto exit;
1876 	}
1877 
1878 	err = f2fs_write_check_point_pack();
1879 	if (err < 0) {
1880 		MSG(0, "\tError: Failed to write the check point pack!!!\n");
1881 		goto exit;
1882 	}
1883 
1884 	err = f2fs_write_super_block();
1885 	if (err < 0) {
1886 		MSG(0, "\tError: Failed to write the super block!!!\n");
1887 		goto exit;
1888 	}
1889 exit:
1890 	if (err)
1891 		MSG(0, "\tError: Could not format the device!!!\n");
1892 
1893 	return err;
1894 }
1895