1 /**
2 * resize.c
3 *
4 * Copyright (c) 2015 Jaegeuk Kim <[email protected]>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include "fsck.h"
11
get_new_sb(struct f2fs_super_block * sb)12 static int get_new_sb(struct f2fs_super_block *sb)
13 {
14 uint32_t zone_size_bytes;
15 uint64_t zone_align_start_offset;
16 uint32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
17 uint32_t sit_segments, nat_segments, diff, total_meta_segments;
18 uint32_t total_valid_blks_available;
19 uint32_t sit_bitmap_size, max_sit_bitmap_size;
20 uint32_t max_nat_bitmap_size, max_nat_segments;
21 uint32_t segment_size_bytes = 1 << (get_sb(log_blocksize) +
22 get_sb(log_blocks_per_seg));
23 uint32_t blks_per_seg = 1 << get_sb(log_blocks_per_seg);
24 uint32_t segs_per_zone = get_sb(segs_per_sec) * get_sb(secs_per_zone);
25
26 set_sb(block_count, c.target_sectors >>
27 get_sb(log_sectors_per_block));
28
29 zone_size_bytes = segment_size_bytes * segs_per_zone;
30 zone_align_start_offset =
31 ((uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
32 2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
33 zone_size_bytes * zone_size_bytes -
34 (uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
35
36 set_sb(segment_count, (c.target_sectors * c.sector_size -
37 zone_align_start_offset) / segment_size_bytes /
38 c.segs_per_sec * c.segs_per_sec);
39
40 if (c.safe_resize)
41 goto safe_resize;
42
43 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
44 sit_segments = SEG_ALIGN(blocks_for_sit);
45 set_sb(segment_count_sit, sit_segments * 2);
46 set_sb(nat_blkaddr, get_sb(sit_blkaddr) +
47 get_sb(segment_count_sit) * blks_per_seg);
48
49 total_valid_blks_available = (get_sb(segment_count) -
50 (get_sb(segment_count_ckpt) +
51 get_sb(segment_count_sit))) * blks_per_seg;
52 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
53 NAT_ENTRY_PER_BLOCK);
54
55 if (c.large_nat_bitmap) {
56 nat_segments = SEG_ALIGN(blocks_for_nat) *
57 DEFAULT_NAT_ENTRY_RATIO / 100;
58 set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
59
60 max_nat_bitmap_size = (get_sb(segment_count_nat) <<
61 get_sb(log_blocks_per_seg)) / 8;
62 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
63 } else {
64 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
65 max_nat_bitmap_size = 0;
66 }
67
68 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
69 get_sb(log_blocks_per_seg)) / 8;
70 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
71 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
72 else
73 max_sit_bitmap_size = sit_bitmap_size;
74
75 if (c.large_nat_bitmap) {
76 /* use cp_payload if free space of f2fs_checkpoint is not enough */
77 if (max_sit_bitmap_size + max_nat_bitmap_size >
78 MAX_BITMAP_SIZE_IN_CKPT) {
79 uint32_t diff = max_sit_bitmap_size +
80 max_nat_bitmap_size -
81 MAX_BITMAP_SIZE_IN_CKPT;
82 set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
83 } else {
84 set_sb(cp_payload, 0);
85 }
86 } else {
87 /*
88 * It should be reserved minimum 1 segment for nat.
89 * When sit is too large, we should expand cp area.
90 * It requires more pages for cp.
91 */
92 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
93 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
94 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
95 } else {
96 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
97 max_sit_bitmap_size;
98 set_sb(cp_payload, 0);
99 }
100
101 max_nat_segments = (max_nat_bitmap_size * 8) >>
102 get_sb(log_blocks_per_seg);
103
104 if (get_sb(segment_count_nat) > max_nat_segments)
105 set_sb(segment_count_nat, max_nat_segments);
106
107 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
108 }
109
110 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) +
111 get_sb(segment_count_nat) * blks_per_seg);
112
113 total_valid_blks_available = (get_sb(segment_count) -
114 (get_sb(segment_count_ckpt) +
115 get_sb(segment_count_sit) +
116 get_sb(segment_count_nat))) * blks_per_seg;
117
118 blocks_for_ssa = total_valid_blks_available / blks_per_seg + 1;
119
120 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
121
122 total_meta_segments = get_sb(segment_count_ckpt) +
123 get_sb(segment_count_sit) +
124 get_sb(segment_count_nat) +
125 get_sb(segment_count_ssa);
126
127 diff = total_meta_segments % segs_per_zone;
128 if (diff)
129 set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
130 (segs_per_zone - diff));
131
132 set_sb(main_blkaddr, get_sb(ssa_blkaddr) + get_sb(segment_count_ssa) *
133 blks_per_seg);
134
135 safe_resize:
136 set_sb(segment_count_main, get_sb(segment_count) -
137 (get_sb(segment_count_ckpt) +
138 get_sb(segment_count_sit) +
139 get_sb(segment_count_nat) +
140 get_sb(segment_count_ssa)));
141
142 set_sb(section_count, get_sb(segment_count_main) /
143 get_sb(segs_per_sec));
144
145 set_sb(segment_count_main, get_sb(section_count) *
146 get_sb(segs_per_sec));
147
148 /* Let's determine the best reserved and overprovisioned space */
149 if (c.new_overprovision == 0)
150 c.new_overprovision = get_best_overprovision(sb);
151
152 c.new_reserved_segments =
153 (100 / c.new_overprovision + 1 + NR_CURSEG_TYPE) *
154 get_sb(segs_per_sec);
155
156 if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments ||
157 get_sb(segment_count_main) * blks_per_seg >
158 get_sb(block_count)) {
159 MSG(0, "\tError: Device size is not sufficient for F2FS volume, "
160 "more segment needed =%u",
161 c.new_reserved_segments -
162 (get_sb(segment_count_main) - 2));
163 return -1;
164 }
165 return 0;
166 }
167
migrate_main(struct f2fs_sb_info * sbi,unsigned int offset)168 static void migrate_main(struct f2fs_sb_info *sbi, unsigned int offset)
169 {
170 void *raw = calloc(F2FS_BLKSIZE, 1);
171 struct seg_entry *se;
172 block_t from, to;
173 int i, j, ret;
174 struct f2fs_summary sum;
175
176 ASSERT(raw != NULL);
177
178 for (i = MAIN_SEGS(sbi) - 1; i >= 0; i--) {
179 se = get_seg_entry(sbi, i);
180 if (!se->valid_blocks)
181 continue;
182
183 for (j = sbi->blocks_per_seg - 1; j >= 0; j--) {
184 if (!f2fs_test_bit(j, (const char *)se->cur_valid_map))
185 continue;
186
187 from = START_BLOCK(sbi, i) + j;
188 ret = dev_read_block(raw, from);
189 ASSERT(ret >= 0);
190
191 to = from + offset;
192 ret = dev_write_block(raw, to,
193 f2fs_io_type_to_rw_hint(se->type));
194 ASSERT(ret >= 0);
195
196 get_sum_entry(sbi, from, &sum);
197
198 if (IS_DATASEG(se->type))
199 update_data_blkaddr(sbi, le32_to_cpu(sum.nid),
200 le16_to_cpu(sum.ofs_in_node), to, NULL);
201 else
202 update_nat_blkaddr(sbi, 0,
203 le32_to_cpu(sum.nid), to);
204 }
205 }
206 free(raw);
207 DBG(0, "Info: Done to migrate Main area: main_blkaddr = 0x%x -> 0x%x\n",
208 START_BLOCK(sbi, 0),
209 START_BLOCK(sbi, 0) + offset);
210 }
211
move_ssa(struct f2fs_sb_info * sbi,unsigned int segno,block_t new_sum_blk_addr)212 static void move_ssa(struct f2fs_sb_info *sbi, unsigned int segno,
213 block_t new_sum_blk_addr)
214 {
215 struct f2fs_summary_block *sum_blk;
216 int type;
217
218 sum_blk = get_sum_block(sbi, segno, &type);
219 if (type < SEG_TYPE_MAX) {
220 int ret;
221
222 ret = dev_write_block(sum_blk, new_sum_blk_addr,
223 WRITE_LIFE_NONE);
224 ASSERT(ret >= 0);
225 DBG(1, "Write summary block: (%d) segno=%x/%x --> (%d) %x\n",
226 type, segno, GET_SUM_BLKADDR(sbi, segno),
227 IS_SUM_NODE_SEG(sum_blk),
228 new_sum_blk_addr);
229 }
230 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
231 type == SEG_TYPE_MAX) {
232 free(sum_blk);
233 }
234 DBG(1, "Info: Done to migrate SSA blocks\n");
235 }
236
migrate_ssa(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)237 static void migrate_ssa(struct f2fs_sb_info *sbi,
238 struct f2fs_super_block *new_sb, unsigned int offset)
239 {
240 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
241 block_t old_sum_blkaddr = get_sb(ssa_blkaddr);
242 block_t new_sum_blkaddr = get_newsb(ssa_blkaddr);
243 block_t end_sum_blkaddr = get_newsb(main_blkaddr);
244 block_t expand_sum_blkaddr = new_sum_blkaddr +
245 MAIN_SEGS(sbi) - offset;
246 block_t blkaddr;
247 int ret;
248 void *zero_block = calloc(F2FS_BLKSIZE, 1);
249 ASSERT(zero_block);
250
251 if (offset && new_sum_blkaddr < old_sum_blkaddr + offset) {
252 blkaddr = new_sum_blkaddr;
253 while (blkaddr < end_sum_blkaddr) {
254 if (blkaddr < expand_sum_blkaddr) {
255 move_ssa(sbi, offset++, blkaddr++);
256 } else {
257 ret = dev_write_block(zero_block, blkaddr++,
258 WRITE_LIFE_NONE);
259 ASSERT(ret >=0);
260 }
261 }
262 } else {
263 blkaddr = end_sum_blkaddr - 1;
264 offset = MAIN_SEGS(sbi) - 1;
265 while (blkaddr >= new_sum_blkaddr) {
266 if (blkaddr >= expand_sum_blkaddr) {
267 ret = dev_write_block(zero_block, blkaddr--,
268 WRITE_LIFE_NONE);
269 ASSERT(ret >=0);
270 } else {
271 move_ssa(sbi, offset--, blkaddr--);
272 }
273 }
274 }
275
276 DBG(0, "Info: Done to migrate SSA blocks: sum_blkaddr = 0x%x -> 0x%x\n",
277 old_sum_blkaddr, new_sum_blkaddr);
278 free(zero_block);
279 }
280
shrink_nats(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)281 static int shrink_nats(struct f2fs_sb_info *sbi,
282 struct f2fs_super_block *new_sb)
283 {
284 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
285 struct f2fs_nm_info *nm_i = NM_I(sbi);
286 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
287 unsigned int nat_blocks;
288 void *nat_block, *zero_block;
289 int nid, ret, new_max_nid;
290 pgoff_t block_off;
291 pgoff_t block_addr;
292 int seg_off;
293
294 nat_block = malloc(F2FS_BLKSIZE);
295 ASSERT(nat_block);
296 zero_block = calloc(F2FS_BLKSIZE, 1);
297 ASSERT(zero_block);
298
299 nat_blocks = get_newsb(segment_count_nat) >> 1;
300 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
301 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
302
303 for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) {
304 block_off = nid / NAT_ENTRY_PER_BLOCK;
305 seg_off = block_off >> sbi->log_blocks_per_seg;
306 block_addr = (pgoff_t)(old_nat_blkaddr +
307 (seg_off << sbi->log_blocks_per_seg << 1) +
308 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
309
310 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
311 block_addr += sbi->blocks_per_seg;
312
313 ret = dev_read_block(nat_block, block_addr);
314 ASSERT(ret >= 0);
315
316 if (memcmp(zero_block, nat_block, F2FS_BLKSIZE)) {
317 ret = -1;
318 goto not_avail;
319 }
320 }
321 ret = 0;
322 nm_i->max_nid = new_max_nid;
323 not_avail:
324 free(nat_block);
325 free(zero_block);
326 return ret;
327 }
328
migrate_nat(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)329 static void migrate_nat(struct f2fs_sb_info *sbi,
330 struct f2fs_super_block *new_sb)
331 {
332 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
333 struct f2fs_nm_info *nm_i = NM_I(sbi);
334 block_t old_nat_blkaddr = get_sb(nat_blkaddr);
335 block_t new_nat_blkaddr = get_newsb(nat_blkaddr);
336 unsigned int nat_blocks;
337 void *nat_block;
338 int nid, ret, new_max_nid;
339 pgoff_t block_off;
340 pgoff_t block_addr;
341 int seg_off;
342
343 nat_block = malloc(F2FS_BLKSIZE);
344 ASSERT(nat_block);
345
346 for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) {
347 block_off = nid / NAT_ENTRY_PER_BLOCK;
348 seg_off = block_off >> sbi->log_blocks_per_seg;
349 block_addr = (pgoff_t)(old_nat_blkaddr +
350 (seg_off << sbi->log_blocks_per_seg << 1) +
351 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
352
353 /* move to set #0 */
354 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
355 block_addr += sbi->blocks_per_seg;
356 f2fs_clear_bit(block_off, nm_i->nat_bitmap);
357 }
358
359 ret = dev_read_block(nat_block, block_addr);
360 ASSERT(ret >= 0);
361
362 block_addr = (pgoff_t)(new_nat_blkaddr +
363 (seg_off << sbi->log_blocks_per_seg << 1) +
364 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
365
366 /* new bitmap should be zeros */
367 ret = dev_write_block(nat_block, block_addr, WRITE_LIFE_NONE);
368 ASSERT(ret >= 0);
369 }
370 /* zero out newly assigned nids */
371 memset(nat_block, 0, F2FS_BLKSIZE);
372 nat_blocks = get_newsb(segment_count_nat) >> 1;
373 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg);
374 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
375
376 DBG(1, "Write NAT block: %x->%x, max_nid=%x->%x\n",
377 old_nat_blkaddr, new_nat_blkaddr,
378 get_sb(segment_count_nat),
379 get_newsb(segment_count_nat));
380
381 for (nid = nm_i->max_nid; nid < new_max_nid;
382 nid += NAT_ENTRY_PER_BLOCK) {
383 block_off = nid / NAT_ENTRY_PER_BLOCK;
384 seg_off = block_off >> sbi->log_blocks_per_seg;
385 block_addr = (pgoff_t)(new_nat_blkaddr +
386 (seg_off << sbi->log_blocks_per_seg << 1) +
387 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
388 ret = dev_write_block(nat_block, block_addr, WRITE_LIFE_NONE);
389 ASSERT(ret >= 0);
390 DBG(3, "Write NAT: %lx\n", block_addr);
391 }
392 free(nat_block);
393 DBG(0, "Info: Done to migrate NAT blocks: nat_blkaddr = 0x%x -> 0x%x\n",
394 old_nat_blkaddr, new_nat_blkaddr);
395 }
396
migrate_sit(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)397 static void migrate_sit(struct f2fs_sb_info *sbi,
398 struct f2fs_super_block *new_sb, unsigned int offset)
399 {
400 struct sit_info *sit_i = SIT_I(sbi);
401 unsigned int ofs = 0, pre_ofs = 0;
402 unsigned int segno, index;
403 struct f2fs_sit_block *sit_blk = calloc(F2FS_BLKSIZE, 1);
404 block_t sit_blks = get_newsb(segment_count_sit) <<
405 (sbi->log_blocks_per_seg - 1);
406 struct seg_entry *se;
407 block_t blk_addr = 0;
408 int ret;
409
410 ASSERT(sit_blk);
411
412 /* initialize with zeros */
413 for (index = 0; index < sit_blks; index++) {
414 ret = dev_write_block(sit_blk, get_newsb(sit_blkaddr) + index,
415 WRITE_LIFE_NONE);
416 ASSERT(ret >= 0);
417 DBG(3, "Write zero sit: %x\n", get_newsb(sit_blkaddr) + index);
418 }
419
420 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
421 struct f2fs_sit_entry *sit;
422
423 se = get_seg_entry(sbi, segno);
424 if (segno < offset) {
425 ASSERT(se->valid_blocks == 0);
426 continue;
427 }
428
429 ofs = SIT_BLOCK_OFFSET(sit_i, segno - offset);
430
431 if (ofs != pre_ofs) {
432 blk_addr = get_newsb(sit_blkaddr) + pre_ofs;
433 ret = dev_write_block(sit_blk, blk_addr,
434 WRITE_LIFE_NONE);
435 ASSERT(ret >= 0);
436 DBG(1, "Write valid sit: %x\n", blk_addr);
437
438 pre_ofs = ofs;
439 memset(sit_blk, 0, F2FS_BLKSIZE);
440 }
441
442 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno - offset)];
443 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
444 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
445 se->valid_blocks);
446 }
447 blk_addr = get_newsb(sit_blkaddr) + ofs;
448 ret = dev_write_block(sit_blk, blk_addr, WRITE_LIFE_NONE);
449 DBG(1, "Write valid sit: %x\n", blk_addr);
450 ASSERT(ret >= 0);
451
452 free(sit_blk);
453 DBG(0, "Info: Done to restore new SIT blocks: 0x%x\n",
454 get_newsb(sit_blkaddr));
455 }
456
rebuild_checkpoint(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb,unsigned int offset)457 static void rebuild_checkpoint(struct f2fs_sb_info *sbi,
458 struct f2fs_super_block *new_sb, unsigned int offset)
459 {
460 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
461 unsigned long long cp_ver = get_cp(checkpoint_ver);
462 struct f2fs_checkpoint *new_cp;
463 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
464 unsigned int free_segment_count, new_segment_count;
465 block_t new_cp_blks = 1 + get_newsb(cp_payload);
466 block_t orphan_blks = 0;
467 block_t new_cp_blk_no, old_cp_blk_no;
468 uint32_t crc = 0;
469 u32 flags;
470 void *buf;
471 int i, ret;
472
473 new_cp = calloc(new_cp_blks * F2FS_BLKSIZE, 1);
474 ASSERT(new_cp);
475
476 buf = malloc(F2FS_BLKSIZE);
477 ASSERT(buf);
478
479 /* ovp / free segments */
480 set_cp(rsvd_segment_count, c.new_reserved_segments);
481 set_cp(overprov_segment_count, (get_newsb(segment_count_main) -
482 get_cp(rsvd_segment_count)) *
483 c.new_overprovision / 100);
484
485 /* give 2 sections (DATA and NODE) to trigger GC in advance */
486 if (get_cp(overprov_segment_count) < get_cp(rsvd_segment_count))
487 set_cp(overprov_segment_count, get_cp(rsvd_segment_count));
488
489 set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
490 2 * get_sb(segs_per_sec));
491
492 DBG(0, "Info: Overprovision ratio = %.3lf%%\n", c.new_overprovision);
493 DBG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
494 get_cp(overprov_segment_count),
495 c.new_reserved_segments);
496
497 free_segment_count = get_free_segments(sbi);
498 new_segment_count = get_newsb(segment_count_main) -
499 get_sb(segment_count_main);
500
501 set_cp(free_segment_count, free_segment_count + new_segment_count);
502 set_cp(user_block_count, ((get_newsb(segment_count_main) -
503 get_cp(overprov_segment_count)) * c.blks_per_seg));
504
505 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG))
506 orphan_blks = __start_sum_addr(sbi) - 1;
507
508 set_cp(cp_pack_start_sum, 1 + get_newsb(cp_payload));
509 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_newsb(cp_payload));
510
511 /* cur->segno - offset */
512 for (i = 0; i < NO_CHECK_TYPE; i++) {
513 if (i < CURSEG_HOT_NODE) {
514 set_cp(cur_data_segno[i],
515 CURSEG_I(sbi, i)->segno - offset);
516 } else {
517 int n = i - CURSEG_HOT_NODE;
518
519 set_cp(cur_node_segno[n],
520 CURSEG_I(sbi, i)->segno - offset);
521 }
522 }
523
524 /* sit / nat ver bitmap bytesize */
525 set_cp(sit_ver_bitmap_bytesize,
526 ((get_newsb(segment_count_sit) / 2) <<
527 get_newsb(log_blocks_per_seg)) / 8);
528 set_cp(nat_ver_bitmap_bytesize,
529 ((get_newsb(segment_count_nat) / 2) <<
530 get_newsb(log_blocks_per_seg)) / 8);
531
532 /* update nat_bits flag */
533 flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags));
534 if (c.large_nat_bitmap)
535 flags |= CP_LARGE_NAT_BITMAP_FLAG;
536
537 if (flags & CP_COMPACT_SUM_FLAG)
538 flags &= ~CP_COMPACT_SUM_FLAG;
539 if (flags & CP_LARGE_NAT_BITMAP_FLAG)
540 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
541 else
542 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
543
544 set_cp(ckpt_flags, flags);
545
546 memcpy(new_cp, cp, (unsigned char *)cp->sit_nat_version_bitmap -
547 (unsigned char *)cp);
548 if (c.safe_resize)
549 memcpy((void *)new_cp + CP_BITMAP_OFFSET,
550 (void *)cp + CP_BITMAP_OFFSET,
551 F2FS_BLKSIZE - CP_BITMAP_OFFSET);
552
553 new_cp->checkpoint_ver = cpu_to_le64(cp_ver + 1);
554
555 crc = f2fs_checkpoint_chksum(new_cp);
556 *((__le32 *)((unsigned char *)new_cp + get_cp(checksum_offset))) =
557 cpu_to_le32(crc);
558
559 /* Write a new checkpoint in the other set */
560 new_cp_blk_no = old_cp_blk_no = get_sb(cp_blkaddr);
561 if (sbi->cur_cp == 2)
562 old_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
563 else
564 new_cp_blk_no += 1 << get_sb(log_blocks_per_seg);
565
566 /* write first cp */
567 ret = dev_write_block(new_cp, new_cp_blk_no++, WRITE_LIFE_NONE);
568 ASSERT(ret >= 0);
569
570 memset(buf, 0, F2FS_BLKSIZE);
571 for (i = 0; i < get_newsb(cp_payload); i++) {
572 ret = dev_write_block(buf, new_cp_blk_no++, WRITE_LIFE_NONE);
573 ASSERT(ret >= 0);
574 }
575
576 for (i = 0; i < orphan_blks; i++) {
577 block_t orphan_blk_no = old_cp_blk_no + 1 + get_sb(cp_payload);
578
579 ret = dev_read_block(buf, orphan_blk_no++);
580 ASSERT(ret >= 0);
581
582 ret = dev_write_block(buf, new_cp_blk_no++, WRITE_LIFE_NONE);
583 ASSERT(ret >= 0);
584 }
585
586 /* update summary blocks having nullified journal entries */
587 for (i = 0; i < NO_CHECK_TYPE; i++) {
588 struct curseg_info *curseg = CURSEG_I(sbi, i);
589
590 ret = dev_write_block(curseg->sum_blk, new_cp_blk_no++,
591 WRITE_LIFE_NONE);
592 ASSERT(ret >= 0);
593 }
594
595 /* write the last cp */
596 ret = dev_write_block(new_cp, new_cp_blk_no++, WRITE_LIFE_NONE);
597 ASSERT(ret >= 0);
598
599 /* Write nat bits */
600 if (flags & CP_NAT_BITS_FLAG)
601 write_nat_bits(sbi, new_sb, new_cp, sbi->cur_cp == 1 ? 2 : 1);
602
603 /* disable old checkpoint */
604 memset(buf, 0, F2FS_BLKSIZE);
605 ret = dev_write_block(buf, old_cp_blk_no, WRITE_LIFE_NONE);
606 ASSERT(ret >= 0);
607
608 free(buf);
609 free(new_cp);
610 DBG(0, "Info: Done to rebuild checkpoint blocks\n");
611 }
612
f2fs_resize_check(struct f2fs_sb_info * sbi,struct f2fs_super_block * new_sb)613 static int f2fs_resize_check(struct f2fs_sb_info *sbi, struct f2fs_super_block *new_sb)
614 {
615 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
616 block_t user_block_count;
617 unsigned int overprov_segment_count;
618
619 overprov_segment_count = (get_newsb(segment_count_main) -
620 c.new_reserved_segments) *
621 c.new_overprovision / 100;
622
623 overprov_segment_count += 2 * get_newsb(segs_per_sec);
624
625 user_block_count = (get_newsb(segment_count_main) -
626 overprov_segment_count) * c.blks_per_seg;
627
628 if (get_cp(valid_block_count) > user_block_count)
629 return -1;
630
631 return 0;
632 }
633
f2fs_resize_grow(struct f2fs_sb_info * sbi)634 static int f2fs_resize_grow(struct f2fs_sb_info *sbi)
635 {
636 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
637 struct f2fs_super_block new_sb_raw;
638 struct f2fs_super_block *new_sb = &new_sb_raw;
639 block_t end_blkaddr, old_main_blkaddr, new_main_blkaddr;
640 unsigned int offset;
641 unsigned int offset_seg = 0;
642 int err = -1;
643
644 /* flush NAT/SIT journal entries */
645 flush_journal_entries(sbi);
646
647 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
648 if (get_new_sb(new_sb))
649 return -1;
650
651 if (f2fs_resize_check(sbi, new_sb) < 0)
652 return -1;
653
654 /* check nat availability */
655 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
656 err = shrink_nats(sbi, new_sb);
657 if (err) {
658 MSG(0, "\tError: Failed to shrink NATs\n");
659 return err;
660 }
661 }
662
663 old_main_blkaddr = get_sb(main_blkaddr);
664 new_main_blkaddr = get_newsb(main_blkaddr);
665 offset = new_main_blkaddr - old_main_blkaddr;
666 end_blkaddr = (get_sb(segment_count_main) <<
667 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
668
669 err = -EAGAIN;
670 if (new_main_blkaddr < end_blkaddr) {
671 err = f2fs_defragment(sbi, old_main_blkaddr, offset,
672 new_main_blkaddr, 0);
673 if (!err)
674 offset_seg = offset >> get_sb(log_blocks_per_seg);
675 MSG(0, "Try to do defragement: %s\n", err ? "Skip": "Done");
676 }
677 /* move whole data region */
678 if (err)
679 migrate_main(sbi, offset);
680
681 migrate_ssa(sbi, new_sb, offset_seg);
682 migrate_nat(sbi, new_sb);
683 migrate_sit(sbi, new_sb, offset_seg);
684 rebuild_checkpoint(sbi, new_sb, offset_seg);
685 update_superblock(new_sb, SB_MASK_ALL);
686 print_raw_sb_info(sb);
687 print_raw_sb_info(new_sb);
688
689 return 0;
690 }
691
f2fs_resize_shrink(struct f2fs_sb_info * sbi)692 static int f2fs_resize_shrink(struct f2fs_sb_info *sbi)
693 {
694 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
695 struct f2fs_super_block new_sb_raw;
696 struct f2fs_super_block *new_sb = &new_sb_raw;
697 block_t old_end_blkaddr, old_main_blkaddr;
698 block_t new_end_blkaddr, new_main_blkaddr, tmp_end_blkaddr;
699 unsigned int offset;
700 int err = -1;
701
702 /* flush NAT/SIT journal entries */
703 flush_journal_entries(sbi);
704
705 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb));
706 if (get_new_sb(new_sb))
707 return -1;
708
709 if (f2fs_resize_check(sbi, new_sb) < 0)
710 return -1;
711
712 /* check nat availability */
713 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) {
714 err = shrink_nats(sbi, new_sb);
715 if (err) {
716 MSG(0, "\tError: Failed to shrink NATs\n");
717 return err;
718 }
719 }
720
721 old_main_blkaddr = get_sb(main_blkaddr);
722 new_main_blkaddr = get_newsb(main_blkaddr);
723 offset = old_main_blkaddr - new_main_blkaddr;
724 old_end_blkaddr = (get_sb(segment_count_main) <<
725 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
726 new_end_blkaddr = (get_newsb(segment_count_main) <<
727 get_newsb(log_blocks_per_seg)) + get_newsb(main_blkaddr);
728
729 tmp_end_blkaddr = new_end_blkaddr + offset;
730 err = f2fs_defragment(sbi, tmp_end_blkaddr,
731 old_end_blkaddr - tmp_end_blkaddr,
732 tmp_end_blkaddr, 1);
733 MSG(0, "Try to do defragement: %s\n", err ? "Insufficient Space": "Done");
734
735 if (err) {
736 return -ENOSPC;
737 }
738
739 update_superblock(new_sb, SB_MASK_ALL);
740 rebuild_checkpoint(sbi, new_sb, 0);
741 /*if (!c.safe_resize) {
742 migrate_sit(sbi, new_sb, offset_seg);
743 migrate_nat(sbi, new_sb);
744 migrate_ssa(sbi, new_sb, offset_seg);
745 }*/
746
747 /* move whole data region */
748 //if (err)
749 // migrate_main(sbi, offset);
750 print_raw_sb_info(sb);
751 print_raw_sb_info(new_sb);
752
753 return 0;
754 }
755
f2fs_resize(struct f2fs_sb_info * sbi)756 int f2fs_resize(struct f2fs_sb_info *sbi)
757 {
758 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
759
760 /* may different sector size */
761 if ((c.target_sectors * c.sector_size >>
762 get_sb(log_blocksize)) < get_sb(block_count))
763 if (!c.safe_resize) {
764 ASSERT_MSG("Nothing to resize, now only supports resizing with safe resize flag\n");
765 return -1;
766 } else {
767 return f2fs_resize_shrink(sbi);
768 }
769 else if (((c.target_sectors * c.sector_size >>
770 get_sb(log_blocksize)) > get_sb(block_count)) ||
771 c.force)
772 return f2fs_resize_grow(sbi);
773 else {
774 MSG(0, "Nothing to resize.\n");
775 return 0;
776 }
777 }
778