Lines Matching full:sbi

33 	struct f2fs_sb_info *sbi = data;  in gc_thread_func()  local
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq; in gc_thread_func()
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) in gc_thread_func()
62 if (f2fs_readonly(sbi->sb)) { in gc_thread_func()
63 stat_other_skip_bggc_count(sbi); in gc_thread_func()
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
71 stat_other_skip_bggc_count(sbi); in gc_thread_func()
75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) in gc_thread_func()
76 f2fs_stop_checkpoint(sbi, false, in gc_thread_func()
79 if (!sb_start_write_trylock(sbi->sb)) { in gc_thread_func()
80 stat_other_skip_bggc_count(sbi); in gc_thread_func()
99 if (sbi->gc_mode == GC_URGENT_HIGH || in gc_thread_func()
100 sbi->gc_mode == GC_URGENT_MID) { in gc_thread_func()
102 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
107 f2fs_down_write(&sbi->gc_lock); in gc_thread_func()
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in gc_thread_func()
110 stat_other_skip_bggc_count(sbi); in gc_thread_func()
114 if (!is_idle(sbi, GC_TIME)) { in gc_thread_func()
116 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
117 stat_io_skip_bggc_count(sbi); in gc_thread_func()
121 if (f2fs_sb_has_blkzoned(sbi)) { in gc_thread_func()
122 if (has_enough_free_blocks(sbi, in gc_thread_func()
125 f2fs_up_write(&sbi->gc_lock); in gc_thread_func()
132 if (need_to_boost_gc(sbi)) { in gc_thread_func()
134 if (f2fs_sb_has_blkzoned(sbi)) in gc_thread_func()
140 stat_inc_gc_call_count(sbi, foreground ? in gc_thread_func()
143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) || in gc_thread_func()
155 if (f2fs_gc(sbi, &gc_control)) { in gc_thread_func()
168 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
169 prefree_segments(sbi), free_segments(sbi)); in gc_thread_func()
172 f2fs_balance_fs_bg(sbi, true); in gc_thread_func()
174 if (sbi->gc_mode != GC_NORMAL) { in gc_thread_func()
175 spin_lock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
176 if (sbi->gc_remaining_trials) { in gc_thread_func()
177 sbi->gc_remaining_trials--; in gc_thread_func()
178 if (!sbi->gc_remaining_trials) in gc_thread_func()
179 sbi->gc_mode = GC_NORMAL; in gc_thread_func()
181 spin_unlock(&sbi->gc_remaining_trials_lock); in gc_thread_func()
183 sb_end_write(sbi->sb); in gc_thread_func()
189 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) in f2fs_start_gc_thread() argument
192 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_gc_thread()
194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); in f2fs_start_gc_thread()
201 if (f2fs_sb_has_blkzoned(sbi)) { in f2fs_start_gc_thread()
217 sbi->gc_thread = gc_th; in f2fs_start_gc_thread()
218 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in f2fs_start_gc_thread()
219 init_waitqueue_head(&sbi->gc_thread->fggc_wq); in f2fs_start_gc_thread()
220 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in f2fs_start_gc_thread()
226 sbi->gc_thread = NULL; in f2fs_start_gc_thread()
233 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) in f2fs_stop_gc_thread() argument
235 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in f2fs_stop_gc_thread()
242 sbi->gc_thread = NULL; in f2fs_stop_gc_thread()
245 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) in select_gc_type() argument
250 if (sbi->am.atgc_enabled) in select_gc_type()
258 switch (sbi->gc_mode) { in select_gc_type()
276 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
279 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
292 p->gc_mode = select_gc_type(sbi, gc_type); in select_policy()
293 p->ofs_unit = SEGS_PER_SEC(sbi); in select_policy()
294 if (__is_large_section(sbi)) { in select_policy()
297 0, MAIN_SECS(sbi)); in select_policy()
309 (sbi->gc_mode != GC_URGENT_HIGH) && in select_policy()
311 p->max_search > sbi->max_victim_search) in select_policy()
312 p->max_search = sbi->max_victim_search; in select_policy()
315 if (f2fs_need_rand_seg(sbi)) in select_policy()
316 p->offset = get_random_u32_below(MAIN_SECS(sbi) * in select_policy()
317 SEGS_PER_SEC(sbi)); in select_policy()
321 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; in select_policy()
324 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
329 return BLKS_PER_SEG(sbi); in get_max_cost()
335 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit); in get_max_cost()
344 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
346 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
354 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
355 if (sec_usage_check(sbi, secno)) in check_bg_victims()
358 return GET_SEG_FROM_SEC(sbi, secno); in check_bg_victims()
363 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
365 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
370 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi); in get_cb_cost()
372 mtime = f2fs_get_section_mtime(sbi, segno); in get_cb_cost()
373 f2fs_bug_on(sbi, mtime == INVALID_MTIME); in get_cb_cost()
374 vblocks = get_valid_blocks(sbi, segno, true); in get_cb_cost()
377 u = BLKS_TO_SEGS(sbi, vblocks * 100); in get_cb_cost()
391 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
395 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
397 if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >= in get_gc_cost()
398 CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio / in get_gc_cost()
404 return get_valid_blocks(sbi, segno, true); in get_gc_cost()
406 return get_cb_cost(sbi, segno); in get_gc_cost()
408 f2fs_bug_on(sbi, 1); in get_gc_cost()
424 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, in f2fs_check_victim_tree() argument
440 f2fs_info(sbi, "broken victim_rbtree, " in f2fs_check_victim_tree()
451 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, in __lookup_victim_entry() argument
454 struct atgc_management *am = &sbi->am; in __lookup_victim_entry()
469 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, in __create_victim_entry() argument
472 struct atgc_management *am = &sbi->am; in __create_victim_entry()
486 static void __insert_victim_entry(struct f2fs_sb_info *sbi, in __insert_victim_entry() argument
489 struct atgc_management *am = &sbi->am; in __insert_victim_entry()
509 ve = __create_victim_entry(sbi, mtime, segno); in __insert_victim_entry()
515 static void add_victim_entry(struct f2fs_sb_info *sbi, in add_victim_entry() argument
518 struct sit_info *sit_i = SIT_I(sbi); in add_victim_entry()
521 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in add_victim_entry()
523 get_valid_blocks(sbi, segno, true) == 0) in add_victim_entry()
527 mtime = f2fs_get_section_mtime(sbi, segno); in add_victim_entry()
528 f2fs_bug_on(sbi, mtime == INVALID_MTIME); in add_victim_entry()
544 __insert_victim_entry(sbi, mtime, segno); in add_victim_entry()
547 static void atgc_lookup_victim(struct f2fs_sb_info *sbi, in atgc_lookup_victim() argument
550 struct sit_info *sit_i = SIT_I(sbi); in atgc_lookup_victim()
551 struct atgc_management *am = &sbi->am; in atgc_lookup_victim()
559 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi); in atgc_lookup_victim()
591 vblocks = get_valid_blocks(sbi, ve->segno, true); in atgc_lookup_victim()
592 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); in atgc_lookup_victim()
598 f2fs_bug_on(sbi, age + u >= UINT_MAX); in atgc_lookup_victim()
620 static void atssr_lookup_victim(struct f2fs_sb_info *sbi, in atssr_lookup_victim() argument
623 struct sit_info *sit_i = SIT_I(sbi); in atssr_lookup_victim()
624 struct atgc_management *am = &sbi->am; in atssr_lookup_victim()
641 ve = __lookup_victim_entry(sbi, p->age); in atssr_lookup_victim()
654 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; in atssr_lookup_victim()
655 f2fs_bug_on(sbi, !vblocks); in atssr_lookup_victim()
658 if (vblocks == BLKS_PER_SEG(sbi)) in atssr_lookup_victim()
684 static void lookup_victim_by_age(struct f2fs_sb_info *sbi, in lookup_victim_by_age() argument
687 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); in lookup_victim_by_age()
690 atgc_lookup_victim(sbi, p); in lookup_victim_by_age()
692 atssr_lookup_victim(sbi, p); in lookup_victim_by_age()
694 f2fs_bug_on(sbi, 1); in lookup_victim_by_age()
697 static void release_victim_entry(struct f2fs_sb_info *sbi) in release_victim_entry() argument
699 struct atgc_management *am = &sbi->am; in release_victim_entry()
710 f2fs_bug_on(sbi, am->victim_count); in release_victim_entry()
711 f2fs_bug_on(sbi, !list_empty(&am->victim_list)); in release_victim_entry()
714 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_pin_section() argument
716 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_pin_section()
717 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_pin_section()
739 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable) in f2fs_unpin_all_sections() argument
741 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in f2fs_unpin_all_sections()
743 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) { in f2fs_unpin_all_sections()
744 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size); in f2fs_unpin_all_sections()
745 DIRTY_I(sbi)->pinned_secmap_cnt = 0; in f2fs_unpin_all_sections()
747 DIRTY_I(sbi)->enable_pin_section = enable; in f2fs_unpin_all_sections()
770 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, in f2fs_get_victim() argument
774 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_get_victim()
775 struct sit_info *sm = SIT_I(sbi); in f2fs_get_victim()
784 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); in f2fs_get_victim()
788 p.age_threshold = sbi->am.age_threshold; in f2fs_get_victim()
792 select_policy(sbi, gc_type, type, &p); in f2fs_get_victim()
795 p.min_cost = get_max_cost(sbi, &p); in f2fs_get_victim()
801 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; in f2fs_get_victim()
804 if (!get_valid_blocks(sbi, *result, false)) { in f2fs_get_victim()
809 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) { in f2fs_get_victim()
814 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap); in f2fs_get_victim()
823 if (__is_large_section(sbi) && p.alloc_mode == LFS) { in f2fs_get_victim()
824 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { in f2fs_get_victim()
825 p.min_segno = sbi->next_victim_seg[BG_GC]; in f2fs_get_victim()
827 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in f2fs_get_victim()
831 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { in f2fs_get_victim()
832 p.min_segno = sbi->next_victim_seg[FG_GC]; in f2fs_get_victim()
834 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in f2fs_get_victim()
841 p.min_segno = check_bg_victims(sbi); in f2fs_get_victim()
879 secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_get_victim()
881 if (sec_usage_check(sbi, secno)) in f2fs_get_victim()
885 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_get_victim()
891 if (get_ckpt_valid_blocks(sbi, segno, true)) in f2fs_get_victim()
899 if (!f2fs_segment_has_free_slot(sbi, segno)) in f2fs_get_victim()
911 add_victim_entry(sbi, &p, segno); in f2fs_get_victim()
915 cost = get_gc_cost(sbi, segno, &p); in f2fs_get_victim()
929 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); in f2fs_get_victim()
936 lookup_victim_by_age(sbi, &p); in f2fs_get_victim()
937 release_victim_entry(sbi); in f2fs_get_victim()
951 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); in f2fs_get_victim()
953 sbi->cur_victim_sec = secno; in f2fs_get_victim()
962 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in f2fs_get_victim()
963 sbi->cur_victim_sec, in f2fs_get_victim()
964 prefree_segments(sbi), free_segments(sbi)); in f2fs_get_victim()
1008 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
1011 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
1016 sentry = get_seg_entry(sbi, segno); in check_valid_map()
1027 static int gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
1036 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_node_segment()
1038 start_addr = START_BLOCK(sbi, segno); in gc_node_segment()
1044 atomic_inc(&sbi->wb_sync_req[NODE]); in gc_node_segment()
1053 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) in gc_node_segment()
1056 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
1060 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_node_segment()
1066 f2fs_ra_node_page(sbi, nid); in gc_node_segment()
1071 node_page = f2fs_get_node_page(sbi, nid); in gc_node_segment()
1076 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
1081 if (f2fs_get_node_info(sbi, nid, &ni, false)) { in gc_node_segment()
1094 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
1101 atomic_dec(&sbi->wb_sync_req[NODE]); in gc_node_segment()
1134 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in is_alive() argument
1145 node_page = f2fs_get_node_page(sbi, nid); in is_alive()
1149 if (f2fs_get_node_info(sbi, nid, dni, false)) { in is_alive()
1155 f2fs_warn(sbi, "%s: valid data with mismatched node version.", in is_alive()
1157 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
1160 if (f2fs_check_nid_range(sbi, dni->ino)) { in is_alive()
1174 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u", in is_alive()
1186 unsigned int segno = GET_SEGNO(sbi, blkaddr); in is_alive()
1187 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in is_alive()
1189 if (unlikely(check_valid_map(sbi, segno, offset))) { in is_alive()
1190 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { in is_alive()
1191 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u", in is_alive()
1193 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
1204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in ra_data_block() local
1210 .sbi = sbi, in ra_data_block()
1227 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1245 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1263 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), in ra_data_block()
1277 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1278 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE); in ra_data_block()
1298 .sbi = F2FS_I_SB(inode), in move_data_block()
1313 bool lfs_mode = f2fs_lfs_mode(fio.sbi); in move_data_block()
1314 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) && in move_data_block()
1315 (fio.sbi->gc_mode != GC_URGENT_HIGH) ? in move_data_block()
1351 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); in move_data_block()
1360 f2fs_down_write(&fio.sbi->io_order_lock); in move_data_block()
1362 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), in move_data_block()
1379 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO, in move_data_block()
1381 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO, in move_data_block()
1385 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || in move_data_block()
1396 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, in move_data_block()
1404 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), in move_data_block()
1418 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1); in move_data_block()
1422 dec_page_count(fio.sbi, F2FS_DIRTY_META); in move_data_block()
1431 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE); in move_data_block()
1439 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, in move_data_block()
1443 f2fs_up_write(&fio.sbi->io_order_lock); in move_data_block()
1479 .sbi = F2FS_I_SB(inode), in move_data_page()
1527 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
1531 struct super_block *sb = sbi->sb; in gc_data_segment()
1537 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in gc_data_segment()
1539 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
1557 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || in gc_data_segment()
1558 (!force_migrate && get_valid_blocks(sbi, segno, true) == in gc_data_segment()
1559 CAP_BLKS_PER_SEC(sbi))) in gc_data_segment()
1562 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
1566 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_data_segment()
1572 f2fs_ra_node_page(sbi, nid); in gc_data_segment()
1577 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) in gc_data_segment()
1581 f2fs_ra_node_page(sbi, dni.ino); in gc_data_segment()
1602 set_sbi_flag(sbi, SBI_NEED_FSCK); in gc_data_segment()
1603 f2fs_err_ratelimited(sbi, in gc_data_segment()
1619 sbi->skipped_gc_rwsem++; in gc_data_segment()
1660 sbi->skipped_gc_rwsem++; in gc_data_segment()
1665 sbi->skipped_gc_rwsem++; in gc_data_segment()
1693 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
1703 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
1706 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
1710 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, in __get_victim()
1716 static int do_garbage_collect(struct f2fs_sb_info *sbi, in do_garbage_collect() argument
1725 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi); in do_garbage_collect()
1728 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? in do_garbage_collect()
1733 if (__is_large_section(sbi)) { in do_garbage_collect()
1734 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi)); in do_garbage_collect()
1742 if (f2fs_sb_has_blkzoned(sbi)) in do_garbage_collect()
1743 sec_end_segno -= SEGS_PER_SEC(sbi) - in do_garbage_collect()
1744 f2fs_usable_segs_in_sec(sbi); in do_garbage_collect()
1748 sbi->migration_window_granularity; in do_garbage_collect()
1750 if (f2fs_sb_has_blkzoned(sbi) && in do_garbage_collect()
1751 !has_enough_free_blocks(sbi, in do_garbage_collect()
1752 sbi->gc_thread->boost_zoned_gc_percent)) in do_garbage_collect()
1763 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); in do_garbage_collect()
1766 if (__is_large_section(sbi)) in do_garbage_collect()
1767 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), in do_garbage_collect()
1772 sum_page = f2fs_get_sum_page(sbi, segno++); in do_garbage_collect()
1778 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1779 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1793 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
1794 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
1797 if (get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1799 if (gc_type == BG_GC && __is_large_section(sbi) && in do_garbage_collect()
1800 migrated >= sbi->migration_granularity) in do_garbage_collect()
1802 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) in do_garbage_collect()
1807 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", in do_garbage_collect()
1809 f2fs_stop_checkpoint(sbi, false, in do_garbage_collect()
1822 submitted += gc_node_segment(sbi, sum->entries, segno, in do_garbage_collect()
1825 submitted += gc_data_segment(sbi, sum->entries, gc_list, in do_garbage_collect()
1829 stat_inc_gc_seg_count(sbi, data_type, gc_type); in do_garbage_collect()
1830 sbi->gc_reclaimed_segs[sbi->gc_mode]++; in do_garbage_collect()
1835 get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
1838 if (__is_large_section(sbi)) in do_garbage_collect()
1839 sbi->next_victim_seg[gc_type] = in do_garbage_collect()
1847 f2fs_submit_merged_write(sbi, data_type); in do_garbage_collect()
1852 stat_inc_gc_sec_count(sbi, data_type, gc_type); in do_garbage_collect()
1857 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) in f2fs_gc() argument
1871 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc, in f2fs_gc()
1873 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1874 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1875 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1876 free_sections(sbi), in f2fs_gc()
1877 free_segments(sbi), in f2fs_gc()
1878 reserved_segments(sbi), in f2fs_gc()
1879 prefree_segments(sbi)); in f2fs_gc()
1881 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
1883 sbi->skipped_gc_rwsem = 0; in f2fs_gc()
1884 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { in f2fs_gc()
1888 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_gc()
1894 if (has_not_enough_free_secs(sbi, 0, 0)) { in f2fs_gc()
1902 if (prefree_segments(sbi)) { in f2fs_gc()
1903 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_gc()
1904 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1918 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time); in f2fs_gc()
1922 f2fs_pinned_section_exists(DIRTY_I(sbi))) { in f2fs_gc()
1923 f2fs_unpin_all_sections(sbi, false); in f2fs_gc()
1929 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, in f2fs_gc()
1937 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) { in f2fs_gc()
1946 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
1948 if (has_enough_free_secs(sbi, sec_freed, 0)) { in f2fs_gc()
1954 if (sbi->skipped_gc_rwsem) in f2fs_gc()
1959 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_gc()
1960 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1963 } else if (has_enough_free_secs(sbi, 0, 0)) { in f2fs_gc()
1967 __get_secs_required(sbi, NULL, &upper_secs, NULL); in f2fs_gc()
1973 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS && in f2fs_gc()
1974 prefree_segments(sbi)) { in f2fs_gc()
1975 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_gc()
1976 ret = f2fs_write_checkpoint(sbi, &cpc); in f2fs_gc()
1987 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; in f2fs_gc()
1988 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno; in f2fs_gc()
1991 f2fs_unpin_all_sections(sbi, true); in f2fs_gc()
1993 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed, in f2fs_gc()
1994 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1995 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1996 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1997 free_sections(sbi), in f2fs_gc()
1998 free_segments(sbi), in f2fs_gc()
1999 reserved_segments(sbi), in f2fs_gc()
2000 prefree_segments(sbi)); in f2fs_gc()
2002 f2fs_up_write(&sbi->gc_lock); in f2fs_gc()
2023 static void init_atgc_management(struct f2fs_sb_info *sbi) in init_atgc_management() argument
2025 struct atgc_management *am = &sbi->am; in init_atgc_management()
2027 if (test_opt(sbi, ATGC) && in init_atgc_management()
2028 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) in init_atgc_management()
2041 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) in f2fs_build_gc_manager() argument
2043 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; in f2fs_build_gc_manager()
2046 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) in f2fs_build_gc_manager()
2047 SIT_I(sbi)->last_victim[ALLOC_NEXT] = in f2fs_build_gc_manager()
2048 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; in f2fs_build_gc_manager()
2050 init_atgc_management(sbi); in f2fs_build_gc_manager()
2053 int f2fs_gc_range(struct f2fs_sb_info *sbi, in f2fs_gc_range() argument
2060 if (unlikely(f2fs_cp_error(sbi))) in f2fs_gc_range()
2063 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) { in f2fs_gc_range()
2069 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false); in f2fs_gc_range()
2072 if (!dry_run && get_valid_blocks(sbi, segno, true)) in f2fs_gc_range()
2075 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0) in f2fs_gc_range()
2085 static int free_segment_range(struct f2fs_sb_info *sbi, in free_segment_range() argument
2095 MAIN_SECS(sbi) -= secs; in free_segment_range()
2096 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi); in free_segment_range()
2097 end = MAIN_SEGS(sbi) - 1; in free_segment_range()
2099 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
2101 if (SIT_I(sbi)->last_victim[gc_mode] >= start) in free_segment_range()
2102 SIT_I(sbi)->last_victim[gc_mode] = 0; in free_segment_range()
2105 if (sbi->next_victim_seg[gc_type] >= start) in free_segment_range()
2106 sbi->next_victim_seg[gc_type] = NULL_SEGNO; in free_segment_range()
2107 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in free_segment_range()
2111 err = f2fs_allocate_segment_for_resize(sbi, type, start, end); in free_segment_range()
2117 err = f2fs_gc_range(sbi, start, end, dry_run, 0); in free_segment_range()
2121 stat_inc_cp_call_count(sbi, TOTAL_CALL); in free_segment_range()
2122 err = f2fs_write_checkpoint(sbi, &cpc); in free_segment_range()
2126 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); in free_segment_range()
2128 f2fs_err(sbi, "segno %u should be free but still inuse!", in free_segment_range()
2130 f2fs_bug_on(sbi, 1); in free_segment_range()
2133 MAIN_SECS(sbi) += secs; in free_segment_range()
2137 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) in update_sb_metadata() argument
2139 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); in update_sb_metadata()
2144 int segs = secs * SEGS_PER_SEC(sbi); in update_sb_metadata()
2146 f2fs_down_write(&sbi->sb_lock); in update_sb_metadata()
2157 (long long)SEGS_TO_BLKS(sbi, segs)); in update_sb_metadata()
2158 if (f2fs_is_multi_device(sbi)) { in update_sb_metadata()
2159 int last_dev = sbi->s_ndevs - 1; in update_sb_metadata()
2167 f2fs_up_write(&sbi->sb_lock); in update_sb_metadata()
2170 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) in update_fs_metadata() argument
2172 int segs = secs * SEGS_PER_SEC(sbi); in update_fs_metadata()
2173 long long blks = SEGS_TO_BLKS(sbi, segs); in update_fs_metadata()
2175 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); in update_fs_metadata()
2177 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; in update_fs_metadata()
2178 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; in update_fs_metadata()
2179 MAIN_SECS(sbi) += secs; in update_fs_metadata()
2180 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; in update_fs_metadata()
2181 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; in update_fs_metadata()
2182 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); in update_fs_metadata()
2184 if (f2fs_is_multi_device(sbi)) { in update_fs_metadata()
2185 int last_dev = sbi->s_ndevs - 1; in update_fs_metadata()
2193 div_u64(blks, sbi->blocks_per_blkz); in update_fs_metadata()
2200 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); in f2fs_resize_fs() local
2207 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); in f2fs_resize_fs()
2211 if (f2fs_is_multi_device(sbi)) { in f2fs_resize_fs()
2212 int last_dev = sbi->s_ndevs - 1; in f2fs_resize_fs()
2215 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <= in f2fs_resize_fs()
2221 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); in f2fs_resize_fs()
2228 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { in f2fs_resize_fs()
2229 f2fs_err(sbi, "Should run fsck to repair first."); in f2fs_resize_fs()
2233 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_resize_fs()
2234 f2fs_err(sbi, "Checkpoint should be enabled."); in f2fs_resize_fs()
2243 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); in f2fs_resize_fs()
2246 if (!f2fs_down_write_trylock(&sbi->gc_lock)) { in f2fs_resize_fs()
2252 f2fs_lock_op(sbi); in f2fs_resize_fs()
2254 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2255 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2256 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2257 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2259 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2264 err = free_segment_range(sbi, secs, true); in f2fs_resize_fs()
2267 f2fs_unlock_op(sbi); in f2fs_resize_fs()
2268 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2274 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE); in f2fs_resize_fs()
2278 if (f2fs_readonly(sbi->sb)) { in f2fs_resize_fs()
2279 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); in f2fs_resize_fs()
2285 f2fs_down_write(&sbi->gc_lock); in f2fs_resize_fs()
2286 f2fs_down_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2288 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2289 if (shrunk_blocks + valid_user_blocks(sbi) + in f2fs_resize_fs()
2290 sbi->current_reserved_blocks + sbi->unusable_block_count + in f2fs_resize_fs()
2291 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) in f2fs_resize_fs()
2294 sbi->user_block_count -= shrunk_blocks; in f2fs_resize_fs()
2295 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2299 set_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2300 err = free_segment_range(sbi, secs, false); in f2fs_resize_fs()
2304 update_sb_metadata(sbi, -secs); in f2fs_resize_fs()
2306 err = f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2308 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2312 update_fs_metadata(sbi, -secs); in f2fs_resize_fs()
2313 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2314 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_resize_fs()
2316 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_resize_fs()
2317 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_resize_fs()
2319 update_fs_metadata(sbi, secs); in f2fs_resize_fs()
2320 update_sb_metadata(sbi, secs); in f2fs_resize_fs()
2321 f2fs_commit_super(sbi, false); in f2fs_resize_fs()
2324 clear_sbi_flag(sbi, SBI_IS_RESIZEFS); in f2fs_resize_fs()
2326 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_resize_fs()
2327 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); in f2fs_resize_fs()
2329 spin_lock(&sbi->stat_lock); in f2fs_resize_fs()
2330 sbi->user_block_count += shrunk_blocks; in f2fs_resize_fs()
2331 spin_unlock(&sbi->stat_lock); in f2fs_resize_fs()
2334 f2fs_up_write(&sbi->cp_global_sem); in f2fs_resize_fs()
2335 f2fs_up_write(&sbi->gc_lock); in f2fs_resize_fs()
2336 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE); in f2fs_resize_fs()