Lines Matching full:sbi
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi) in f2fs_need_SSR() argument
173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); in f2fs_need_SSR()
174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); in f2fs_need_SSR()
175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA); in f2fs_need_SSR()
177 if (f2fs_lfs_mode(sbi)) in f2fs_need_SSR()
179 if (sbi->gc_mode == GC_URGENT_HIGH) in f2fs_need_SSR()
181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_need_SSR()
184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs + in f2fs_need_SSR()
185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi)); in f2fs_need_SSR()
227 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __replace_atomic_write_block() local
243 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __replace_atomic_write_block()
253 dec_valid_block_count(sbi, inode, 1); in __replace_atomic_write_block()
254 f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1); in __replace_atomic_write_block()
257 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, in __replace_atomic_write_block()
263 err = inc_valid_block_count(sbi, inode, &count, true); in __replace_atomic_write_block()
271 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count); in __replace_atomic_write_block()
273 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr, in __replace_atomic_write_block()
310 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __f2fs_commit_atomic_write() local
345 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr, in __f2fs_commit_atomic_write()
375 sbi->revoked_atomic_block += fi->atomic_write_cnt; in __f2fs_commit_atomic_write()
377 sbi->committed_atomic_block += fi->atomic_write_cnt; in __f2fs_commit_atomic_write()
392 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_commit_atomic_write() local
401 f2fs_lock_op(sbi); in f2fs_commit_atomic_write()
405 f2fs_unlock_op(sbi); in f2fs_commit_atomic_write()
415 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) in f2fs_balance_fs() argument
417 if (f2fs_cp_error(sbi)) in f2fs_balance_fs()
420 if (time_to_inject(sbi, FAULT_CHECKPOINT)) in f2fs_balance_fs()
421 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT); in f2fs_balance_fs()
424 if (need && excess_cached_nats(sbi)) in f2fs_balance_fs()
425 f2fs_balance_fs_bg(sbi, false); in f2fs_balance_fs()
427 if (!f2fs_is_checkpoint_ready(sbi)) in f2fs_balance_fs()
434 if (has_enough_free_secs(sbi, 0, 0)) in f2fs_balance_fs()
437 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread && in f2fs_balance_fs()
438 sbi->gc_thread->f2fs_gc_task) { in f2fs_balance_fs()
441 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait, in f2fs_balance_fs()
443 wake_up(&sbi->gc_thread->gc_wait_queue_head); in f2fs_balance_fs()
445 finish_wait(&sbi->gc_thread->fggc_wq, &wait); in f2fs_balance_fs()
454 f2fs_down_write(&sbi->gc_lock); in f2fs_balance_fs()
455 stat_inc_gc_call_count(sbi, FOREGROUND); in f2fs_balance_fs()
456 f2fs_gc(sbi, &gc_control); in f2fs_balance_fs()
460 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) in excess_dirty_threshold() argument
462 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; in excess_dirty_threshold()
463 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); in excess_dirty_threshold()
464 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); in excess_dirty_threshold()
465 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); in excess_dirty_threshold()
466 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META); in excess_dirty_threshold()
467 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA); in excess_dirty_threshold()
469 SEGS_TO_BLKS(sbi, (factor * DEFAULT_DIRTY_THRESHOLD)); in excess_dirty_threshold()
479 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) in f2fs_balance_fs_bg() argument
481 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_balance_fs_bg()
485 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE)) in f2fs_balance_fs_bg()
486 f2fs_shrink_read_extent_tree(sbi, in f2fs_balance_fs_bg()
490 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE)) in f2fs_balance_fs_bg()
491 f2fs_shrink_age_extent_tree(sbi, in f2fs_balance_fs_bg()
495 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES)) in f2fs_balance_fs_bg()
496 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); in f2fs_balance_fs_bg()
498 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) in f2fs_balance_fs_bg()
499 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS); in f2fs_balance_fs_bg()
501 f2fs_build_free_nids(sbi, false, false); in f2fs_balance_fs_bg()
503 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) || in f2fs_balance_fs_bg()
504 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi)) in f2fs_balance_fs_bg()
508 if (is_inflight_io(sbi, REQ_TIME) || in f2fs_balance_fs_bg()
509 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) in f2fs_balance_fs_bg()
513 if (f2fs_time_over(sbi, CP_TIME)) in f2fs_balance_fs_bg()
517 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) && in f2fs_balance_fs_bg()
518 f2fs_available_free_memory(sbi, INO_ENTRIES)) in f2fs_balance_fs_bg()
522 if (test_opt(sbi, DATA_FLUSH) && from_bg) { in f2fs_balance_fs_bg()
525 mutex_lock(&sbi->flush_lock); in f2fs_balance_fs_bg()
528 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false); in f2fs_balance_fs_bg()
531 mutex_unlock(&sbi->flush_lock); in f2fs_balance_fs_bg()
533 stat_inc_cp_call_count(sbi, BACKGROUND); in f2fs_balance_fs_bg()
534 f2fs_sync_fs(sbi->sb, 1); in f2fs_balance_fs_bg()
537 static int __submit_flush_wait(struct f2fs_sb_info *sbi, in __submit_flush_wait() argument
542 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), in __submit_flush_wait()
543 test_opt(sbi, FLUSH_MERGE), ret); in __submit_flush_wait()
545 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0); in __submit_flush_wait()
549 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino) in submit_flush_wait() argument
554 if (!f2fs_is_multi_device(sbi)) in submit_flush_wait()
555 return __submit_flush_wait(sbi, sbi->sb->s_bdev); in submit_flush_wait()
557 for (i = 0; i < sbi->s_ndevs; i++) { in submit_flush_wait()
558 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO)) in submit_flush_wait()
560 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in submit_flush_wait()
569 struct f2fs_sb_info *sbi = data; in issue_flush_thread() local
570 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in issue_flush_thread()
585 ret = submit_flush_wait(sbi, cmd->ino); in issue_flush_thread()
601 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino) in f2fs_issue_flush() argument
603 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in f2fs_issue_flush()
607 if (test_opt(sbi, NOBARRIER)) in f2fs_issue_flush()
610 if (!test_opt(sbi, FLUSH_MERGE)) { in f2fs_issue_flush()
612 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
619 f2fs_is_multi_device(sbi)) { in f2fs_issue_flush()
620 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
655 ret = submit_flush_wait(sbi, ino); in f2fs_issue_flush()
672 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi) in f2fs_create_flush_cmd_control() argument
674 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_create_flush_cmd_control()
677 if (SM_I(sbi)->fcc_info) { in f2fs_create_flush_cmd_control()
678 fcc = SM_I(sbi)->fcc_info; in f2fs_create_flush_cmd_control()
684 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL); in f2fs_create_flush_cmd_control()
691 SM_I(sbi)->fcc_info = fcc; in f2fs_create_flush_cmd_control()
692 if (!test_opt(sbi, FLUSH_MERGE)) in f2fs_create_flush_cmd_control()
696 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, in f2fs_create_flush_cmd_control()
708 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) in f2fs_destroy_flush_cmd_control() argument
710 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; in f2fs_destroy_flush_cmd_control()
720 SM_I(sbi)->fcc_info = NULL; in f2fs_destroy_flush_cmd_control()
724 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi) in f2fs_flush_device_cache() argument
728 if (!f2fs_is_multi_device(sbi)) in f2fs_flush_device_cache()
731 if (test_opt(sbi, NOBARRIER)) in f2fs_flush_device_cache()
734 for (i = 1; i < sbi->s_ndevs; i++) { in f2fs_flush_device_cache()
737 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device)) in f2fs_flush_device_cache()
741 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in f2fs_flush_device_cache()
747 f2fs_stop_checkpoint(sbi, false, in f2fs_flush_device_cache()
752 spin_lock(&sbi->dev_lock); in f2fs_flush_device_cache()
753 f2fs_clear_bit(i, (char *)&sbi->dirty_device); in f2fs_flush_device_cache()
754 spin_unlock(&sbi->dev_lock); in f2fs_flush_device_cache()
760 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, in __locate_dirty_segment() argument
763 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in __locate_dirty_segment()
766 if (IS_CURSEG(sbi, segno)) in __locate_dirty_segment()
773 struct seg_entry *sentry = get_seg_entry(sbi, segno); in __locate_dirty_segment()
777 f2fs_bug_on(sbi, 1); in __locate_dirty_segment()
783 if (__is_large_section(sbi)) { in __locate_dirty_segment()
784 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in __locate_dirty_segment()
786 get_valid_blocks(sbi, segno, true); in __locate_dirty_segment()
788 f2fs_bug_on(sbi, in __locate_dirty_segment()
789 (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && in __locate_dirty_segment()
791 valid_blocks == CAP_BLKS_PER_SEC(sbi)); in __locate_dirty_segment()
793 if (!IS_CURSEC(sbi, secno)) in __locate_dirty_segment()
799 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, in __remove_dirty_segment() argument
802 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in __remove_dirty_segment()
809 struct seg_entry *sentry = get_seg_entry(sbi, segno); in __remove_dirty_segment()
815 valid_blocks = get_valid_blocks(sbi, segno, true); in __remove_dirty_segment()
817 clear_bit(GET_SEC_FROM_SEG(sbi, segno), in __remove_dirty_segment()
820 clear_bit(segno, SIT_I(sbi)->invalid_segmap); in __remove_dirty_segment()
823 if (__is_large_section(sbi)) { in __remove_dirty_segment()
824 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in __remove_dirty_segment()
827 valid_blocks == CAP_BLKS_PER_SEC(sbi)) { in __remove_dirty_segment()
832 if (!IS_CURSEC(sbi, secno)) in __remove_dirty_segment()
843 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) in locate_dirty_segment() argument
845 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in locate_dirty_segment()
849 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) in locate_dirty_segment()
852 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno); in locate_dirty_segment()
855 valid_blocks = get_valid_blocks(sbi, segno, false); in locate_dirty_segment()
856 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false); in locate_dirty_segment()
858 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) || in locate_dirty_segment()
860 __locate_dirty_segment(sbi, segno, PRE); in locate_dirty_segment()
861 __remove_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
863 __locate_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
866 __remove_dirty_segment(sbi, segno, DIRTY); in locate_dirty_segment()
873 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi) in f2fs_dirty_to_prefree() argument
875 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_dirty_to_prefree()
879 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in f2fs_dirty_to_prefree()
880 if (get_valid_blocks(sbi, segno, false)) in f2fs_dirty_to_prefree()
882 if (IS_CURSEG(sbi, segno)) in f2fs_dirty_to_prefree()
884 __locate_dirty_segment(sbi, segno, PRE); in f2fs_dirty_to_prefree()
885 __remove_dirty_segment(sbi, segno, DIRTY); in f2fs_dirty_to_prefree()
890 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi) in f2fs_get_unusable_blocks() argument
893 (overprovision_segments(sbi) - reserved_segments(sbi)); in f2fs_get_unusable_blocks()
894 block_t ovp_holes = SEGS_TO_BLKS(sbi, ovp_hole_segs); in f2fs_get_unusable_blocks()
895 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_get_unusable_blocks()
902 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in f2fs_get_unusable_blocks()
903 se = get_seg_entry(sbi, segno); in f2fs_get_unusable_blocks()
905 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) - in f2fs_get_unusable_blocks()
908 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) - in f2fs_get_unusable_blocks()
919 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable) in f2fs_disable_cp_again() argument
922 (overprovision_segments(sbi) - reserved_segments(sbi)); in f2fs_disable_cp_again()
924 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) in f2fs_disable_cp_again()
926 if (unusable > F2FS_OPTION(sbi).unusable_cap) in f2fs_disable_cp_again()
928 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && in f2fs_disable_cp_again()
929 dirty_segments(sbi) > ovp_hole_segs) in f2fs_disable_cp_again()
931 if (has_not_enough_free_secs(sbi, 0, 0)) in f2fs_disable_cp_again()
937 static unsigned int get_free_segment(struct f2fs_sb_info *sbi) in get_free_segment() argument
939 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_free_segment()
943 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) { in get_free_segment()
944 if (get_valid_blocks(sbi, segno, false)) in get_free_segment()
946 if (get_ckpt_valid_blocks(sbi, segno, false)) in get_free_segment()
955 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, in __create_discard_cmd() argument
959 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __create_discard_cmd()
963 f2fs_bug_on(sbi, !len); in __create_discard_cmd()
987 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) in f2fs_check_discard_tree() argument
990 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_check_discard_tree()
1003 f2fs_info(sbi, "broken discard_rbtree, " in f2fs_check_discard_tree()
1015 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, in __lookup_discard_cmd() argument
1018 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __lookup_discard_cmd()
1107 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, in __remove_discard_cmd() argument
1110 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __remove_discard_cmd()
1122 f2fs_bug_on(sbi, dc->ref); in __remove_discard_cmd()
1128 f2fs_info_ratelimited(sbi, in __remove_discard_cmd()
1151 static void __check_sit_bitmap(struct f2fs_sb_info *sbi, in __check_sit_bitmap() argument
1161 segno = GET_SEGNO(sbi, blk); in __check_sit_bitmap()
1162 sentry = get_seg_entry(sbi, segno); in __check_sit_bitmap()
1163 offset = GET_BLKOFF_FROM_SEG0(sbi, blk); in __check_sit_bitmap()
1165 if (end < START_BLOCK(sbi, segno + 1)) in __check_sit_bitmap()
1166 size = GET_BLKOFF_FROM_SEG0(sbi, end); in __check_sit_bitmap()
1168 size = BLKS_PER_SEG(sbi); in __check_sit_bitmap()
1171 f2fs_bug_on(sbi, offset != size); in __check_sit_bitmap()
1172 blk = START_BLOCK(sbi, segno + 1); in __check_sit_bitmap()
1177 static void __init_discard_policy(struct f2fs_sb_info *sbi, in __init_discard_policy() argument
1181 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __init_discard_policy()
1203 if (utilization(sbi) > dcc->discard_urgent_util) { in __init_discard_policy()
1224 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1229 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi, in __submit_zone_reset_cmd() argument
1234 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __submit_zone_reset_cmd()
1254 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len); in __submit_zone_reset_cmd()
1262 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE); in __submit_zone_reset_cmd()
1267 static int __submit_discard_cmd(struct f2fs_sb_info *sbi, in __submit_discard_cmd() argument
1274 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __submit_discard_cmd()
1284 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) in __submit_discard_cmd()
1288 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) { in __submit_discard_cmd()
1289 int devi = f2fs_bdev_index(sbi, bdev); in __submit_discard_cmd()
1294 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { in __submit_discard_cmd()
1295 __submit_zone_reset_cmd(sbi, dc, flag, in __submit_discard_cmd()
1336 if (time_to_inject(sbi, FAULT_DISCARD)) { in __submit_discard_cmd()
1353 f2fs_bug_on(sbi, !bio); in __submit_discard_cmd()
1372 __check_sit_bitmap(sbi, lstart, lstart + len); in __submit_discard_cmd()
1381 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE); in __submit_discard_cmd()
1391 __update_discard_tree_range(sbi, bdev, lstart, start, len); in __submit_discard_cmd()
1396 static void __insert_discard_cmd(struct f2fs_sb_info *sbi, in __insert_discard_cmd() argument
1400 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __insert_discard_cmd()
1422 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); in __insert_discard_cmd()
1434 static void __punch_discard_cmd(struct f2fs_sb_info *sbi, in __punch_discard_cmd() argument
1437 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __punch_discard_cmd()
1442 __remove_discard_cmd(sbi, dc); in __punch_discard_cmd()
1457 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, in __punch_discard_cmd()
1470 static void __update_discard_tree_range(struct f2fs_sb_info *sbi, in __update_discard_tree_range() argument
1474 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __update_discard_tree_range()
1539 __remove_discard_cmd(sbi, tdc); in __update_discard_tree_range()
1544 __insert_discard_cmd(sbi, bdev, in __update_discard_tree_range()
1557 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi, in __queue_zone_reset_cmd() argument
1563 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_zone_reset_cmd()
1564 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen); in __queue_zone_reset_cmd()
1565 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_zone_reset_cmd()
1569 static void __queue_discard_cmd(struct f2fs_sb_info *sbi, in __queue_discard_cmd() argument
1579 if (f2fs_is_multi_device(sbi)) { in __queue_discard_cmd()
1580 int devi = f2fs_target_device_index(sbi, blkstart); in __queue_discard_cmd()
1584 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_discard_cmd()
1585 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); in __queue_discard_cmd()
1586 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock); in __queue_discard_cmd()
1589 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, in __issue_discard_cmd_orderly() argument
1592 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd_orderly()
1614 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) { in __issue_discard_cmd_orderly()
1620 err = __submit_discard_cmd(sbi, dpolicy, dc, issued); in __issue_discard_cmd_orderly()
1627 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_orderly()
1641 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1644 static int __issue_discard_cmd(struct f2fs_sb_info *sbi, in __issue_discard_cmd() argument
1647 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd()
1655 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT); in __issue_discard_cmd()
1661 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) in __issue_discard_cmd()
1668 __issue_discard_cmd_orderly(sbi, dpolicy, &issued); in __issue_discard_cmd()
1678 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); in __issue_discard_cmd()
1681 f2fs_bug_on(sbi, dc->state != D_PREP); in __issue_discard_cmd()
1684 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT)) in __issue_discard_cmd()
1688 !is_idle(sbi, DISCARD_TIME)) { in __issue_discard_cmd()
1693 __submit_discard_cmd(sbi, dpolicy, dc, &issued); in __issue_discard_cmd()
1707 __wait_all_discard_cmd(sbi, dpolicy); in __issue_discard_cmd()
1717 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi) in __drop_discard_cmd() argument
1719 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __drop_discard_cmd()
1729 f2fs_bug_on(sbi, dc->state != D_PREP); in __drop_discard_cmd()
1730 __remove_discard_cmd(sbi, dc); in __drop_discard_cmd()
1739 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi) in f2fs_drop_discard_cmd() argument
1741 __drop_discard_cmd(sbi); in f2fs_drop_discard_cmd()
1744 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, in __wait_one_discard_bio() argument
1747 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __wait_one_discard_bio()
1752 f2fs_bug_on(sbi, dc->state != D_DONE); in __wait_one_discard_bio()
1757 __remove_discard_cmd(sbi, dc); in __wait_one_discard_bio()
1764 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, in __wait_discard_cmd_range() argument
1768 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __wait_discard_cmd_range()
1788 __remove_discard_cmd(sbi, iter); in __wait_discard_cmd_range()
1798 trimmed += __wait_one_discard_bio(sbi, dc); in __wait_discard_cmd_range()
1805 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi, in __wait_all_discard_cmd() argument
1812 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); in __wait_all_discard_cmd()
1815 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY); in __wait_all_discard_cmd()
1816 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); in __wait_all_discard_cmd()
1817 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY); in __wait_all_discard_cmd()
1818 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); in __wait_all_discard_cmd()
1824 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) in f2fs_wait_discard_bio() argument
1826 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_wait_discard_bio()
1831 dc = __lookup_discard_cmd(sbi, blkaddr); in f2fs_wait_discard_bio()
1833 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) { in f2fs_wait_discard_bio()
1834 int devi = f2fs_bdev_index(sbi, dc->bdev); in f2fs_wait_discard_bio()
1841 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) { in f2fs_wait_discard_bio()
1844 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC, in f2fs_wait_discard_bio()
1849 __wait_one_discard_bio(sbi, dc); in f2fs_wait_discard_bio()
1856 __punch_discard_cmd(sbi, dc, blkaddr); in f2fs_wait_discard_bio()
1865 __wait_one_discard_bio(sbi, dc); in f2fs_wait_discard_bio()
1868 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi) in f2fs_stop_discard_thread() argument
1870 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_stop_discard_thread()
1882 * @sbi: the f2fs_sb_info data for discard cmd to issue
1888 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi) in f2fs_issue_discard_timeout() argument
1890 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_issue_discard_timeout()
1897 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, in f2fs_issue_discard_timeout()
1899 __issue_discard_cmd(sbi, &dpolicy); in f2fs_issue_discard_timeout()
1900 dropped = __drop_discard_cmd(sbi); in f2fs_issue_discard_timeout()
1903 __wait_all_discard_cmd(sbi, NULL); in f2fs_issue_discard_timeout()
1905 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt)); in f2fs_issue_discard_timeout()
1911 struct f2fs_sb_info *sbi = data; in issue_discard_thread() local
1912 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in issue_discard_thread()
1925 if (sbi->gc_mode == GC_URGENT_HIGH || in issue_discard_thread()
1926 !f2fs_available_free_memory(sbi, DISCARD_CACHE)) in issue_discard_thread()
1927 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, in issue_discard_thread()
1930 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, in issue_discard_thread()
1938 __wait_all_discard_cmd(sbi, NULL); in issue_discard_thread()
1940 if (f2fs_readonly(sbi->sb)) in issue_discard_thread()
1944 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || in issue_discard_thread()
1950 sb_start_intwrite(sbi->sb); in issue_discard_thread()
1952 issued = __issue_discard_cmd(sbi, &dpolicy); in issue_discard_thread()
1954 __wait_all_discard_cmd(sbi, &dpolicy); in issue_discard_thread()
1957 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME); in issue_discard_thread()
1966 sb_end_intwrite(sbi->sb); in issue_discard_thread()
1973 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, in __f2fs_issue_discard_zone() argument
1981 if (f2fs_is_multi_device(sbi)) { in __f2fs_issue_discard_zone()
1982 devi = f2fs_target_device_index(sbi, blkstart); in __f2fs_issue_discard_zone()
1985 f2fs_err(sbi, "Invalid block %x", blkstart); in __f2fs_issue_discard_zone()
1992 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) { in __f2fs_issue_discard_zone()
1998 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", in __f2fs_issue_discard_zone()
1999 devi, sbi->s_ndevs ? FDEV(devi).path : "", in __f2fs_issue_discard_zone()
2004 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) { in __f2fs_issue_discard_zone()
2016 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen); in __f2fs_issue_discard_zone()
2021 __queue_discard_cmd(sbi, bdev, lblkstart, blklen); in __f2fs_issue_discard_zone()
2026 static int __issue_discard_async(struct f2fs_sb_info *sbi, in __issue_discard_async() argument
2030 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) in __issue_discard_async()
2031 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); in __issue_discard_async()
2033 __queue_discard_cmd(sbi, bdev, blkstart, blklen); in __issue_discard_async()
2037 static int f2fs_issue_discard(struct f2fs_sb_info *sbi, in f2fs_issue_discard() argument
2047 bdev = f2fs_target_device(sbi, blkstart, NULL); in f2fs_issue_discard()
2052 f2fs_target_device(sbi, i, NULL); in f2fs_issue_discard()
2055 err = __issue_discard_async(sbi, bdev, in f2fs_issue_discard()
2065 se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); in f2fs_issue_discard()
2066 offset = GET_BLKOFF_FROM_SEG0(sbi, i); in f2fs_issue_discard()
2068 if (f2fs_block_unit_discard(sbi) && in f2fs_issue_discard()
2070 sbi->discard_blks--; in f2fs_issue_discard()
2074 err = __issue_discard_async(sbi, bdev, start, len); in f2fs_issue_discard()
2078 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, in add_discard_addrs() argument
2082 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); in add_discard_addrs()
2086 unsigned long *dmap = SIT_I(sbi)->tmp_map; in add_discard_addrs()
2090 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; in add_discard_addrs()
2093 if (se->valid_blocks == BLKS_PER_SEG(sbi) || in add_discard_addrs()
2094 !f2fs_hw_support_discard(sbi) || in add_discard_addrs()
2095 !f2fs_block_unit_discard(sbi)) in add_discard_addrs()
2099 if (!f2fs_realtime_discard_enable(sbi) || in add_discard_addrs()
2101 !IS_CURSEG(sbi, cpc->trim_start)) || in add_discard_addrs()
2102 SM_I(sbi)->dcc_info->nr_discards >= in add_discard_addrs()
2103 SM_I(sbi)->dcc_info->max_discards) in add_discard_addrs()
2112 while (force || SM_I(sbi)->dcc_info->nr_discards <= in add_discard_addrs()
2113 SM_I(sbi)->dcc_info->max_discards) { in add_discard_addrs()
2114 start = __find_rev_next_bit(dmap, BLKS_PER_SEG(sbi), end + 1); in add_discard_addrs()
2115 if (start >= BLKS_PER_SEG(sbi)) in add_discard_addrs()
2119 BLKS_PER_SEG(sbi), start + 1); in add_discard_addrs()
2120 if (force && start && end != BLKS_PER_SEG(sbi) && in add_discard_addrs()
2130 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); in add_discard_addrs()
2137 SM_I(sbi)->dcc_info->nr_discards += end - start; in add_discard_addrs()
2148 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi) in f2fs_release_discard_addrs() argument
2150 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); in f2fs_release_discard_addrs()
2161 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) in set_prefree_as_free_segments() argument
2163 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in set_prefree_as_free_segments()
2167 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) in set_prefree_as_free_segments()
2168 __set_test_and_free(sbi, segno, false); in set_prefree_as_free_segments()
2172 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, in f2fs_clear_prefree_segments() argument
2175 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_clear_prefree_segments()
2178 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in f2fs_clear_prefree_segments()
2183 bool section_alignment = F2FS_OPTION(sbi).discard_unit == in f2fs_clear_prefree_segments()
2186 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi)) in f2fs_clear_prefree_segments()
2196 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); in f2fs_clear_prefree_segments()
2197 if (start >= MAIN_SEGS(sbi)) in f2fs_clear_prefree_segments()
2199 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), in f2fs_clear_prefree_segments()
2203 start = rounddown(start, SEGS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2204 end = roundup(end, SEGS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2212 if (!f2fs_realtime_discard_enable(sbi)) in f2fs_clear_prefree_segments()
2220 if (!f2fs_sb_has_blkzoned(sbi) && in f2fs_clear_prefree_segments()
2221 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) { in f2fs_clear_prefree_segments()
2222 f2fs_issue_discard(sbi, START_BLOCK(sbi, start), in f2fs_clear_prefree_segments()
2223 SEGS_TO_BLKS(sbi, end - start)); in f2fs_clear_prefree_segments()
2227 secno = GET_SEC_FROM_SEG(sbi, start); in f2fs_clear_prefree_segments()
2228 start_segno = GET_SEG_FROM_SEC(sbi, secno); in f2fs_clear_prefree_segments()
2229 if (!IS_CURSEC(sbi, secno) && in f2fs_clear_prefree_segments()
2230 !get_valid_blocks(sbi, start, true)) in f2fs_clear_prefree_segments()
2231 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), in f2fs_clear_prefree_segments()
2232 BLKS_PER_SEC(sbi)); in f2fs_clear_prefree_segments()
2234 start = start_segno + SEGS_PER_SEC(sbi); in f2fs_clear_prefree_segments()
2242 if (!f2fs_block_unit_discard(sbi)) in f2fs_clear_prefree_segments()
2253 BLKS_PER_SEG(sbi), cur_pos); in f2fs_clear_prefree_segments()
2256 if (f2fs_sb_has_blkzoned(sbi) || in f2fs_clear_prefree_segments()
2260 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, in f2fs_clear_prefree_segments()
2265 BLKS_PER_SEG(sbi), cur_pos); in f2fs_clear_prefree_segments()
2271 if (cur_pos < BLKS_PER_SEG(sbi)) in f2fs_clear_prefree_segments()
2279 wake_up_discard_thread(sbi, false); in f2fs_clear_prefree_segments()
2282 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi) in f2fs_start_discard_thread() argument
2284 dev_t dev = sbi->sb->s_bdev->bd_dev; in f2fs_start_discard_thread()
2285 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_start_discard_thread()
2288 if (f2fs_sb_has_readonly(sbi)) { in f2fs_start_discard_thread()
2289 f2fs_info(sbi, in f2fs_start_discard_thread()
2294 if (!f2fs_realtime_discard_enable(sbi)) in f2fs_start_discard_thread()
2297 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, in f2fs_start_discard_thread()
2307 static int create_discard_cmd_control(struct f2fs_sb_info *sbi) in create_discard_cmd_control() argument
2312 if (SM_I(sbi)->dcc_info) { in create_discard_cmd_control()
2313 dcc = SM_I(sbi)->dcc_info; in create_discard_cmd_control()
2317 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL); in create_discard_cmd_control()
2325 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT || in create_discard_cmd_control()
2326 F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) in create_discard_cmd_control()
2327 dcc->discard_granularity = BLKS_PER_SEG(sbi); in create_discard_cmd_control()
2339 dcc->max_discards = SEGS_TO_BLKS(sbi, MAIN_SEGS(sbi)); in create_discard_cmd_control()
2351 SM_I(sbi)->dcc_info = dcc; in create_discard_cmd_control()
2353 err = f2fs_start_discard_thread(sbi); in create_discard_cmd_control()
2356 SM_I(sbi)->dcc_info = NULL; in create_discard_cmd_control()
2362 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) in destroy_discard_cmd_control() argument
2364 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in destroy_discard_cmd_control()
2369 f2fs_stop_discard_thread(sbi); in destroy_discard_cmd_control()
2375 f2fs_issue_discard_timeout(sbi); in destroy_discard_cmd_control()
2378 SM_I(sbi)->dcc_info = NULL; in destroy_discard_cmd_control()
2381 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) in __mark_sit_entry_dirty() argument
2383 struct sit_info *sit_i = SIT_I(sbi); in __mark_sit_entry_dirty()
2393 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, in __set_sit_entry_type() argument
2396 struct seg_entry *se = get_seg_entry(sbi, segno); in __set_sit_entry_type()
2400 __mark_sit_entry_dirty(sbi, segno); in __set_sit_entry_type()
2403 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi, in get_segment_mtime() argument
2406 unsigned int segno = GET_SEGNO(sbi, blkaddr); in get_segment_mtime()
2410 return get_seg_entry(sbi, segno)->mtime; in get_segment_mtime()
2413 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, in update_segment_mtime() argument
2417 unsigned int segno = GET_SEGNO(sbi, blkaddr); in update_segment_mtime()
2418 unsigned long long ctime = get_mtime(sbi, false); in update_segment_mtime()
2424 se = get_seg_entry(sbi, segno); in update_segment_mtime()
2432 if (ctime > SIT_I(sbi)->max_mtime) in update_segment_mtime()
2433 SIT_I(sbi)->max_mtime = ctime; in update_segment_mtime()
2440 static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se, in update_sit_entry_for_release() argument
2450 f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1)); in update_sit_entry_for_release()
2458 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", in update_sit_entry_for_release()
2460 f2fs_bug_on(sbi, 1); in update_sit_entry_for_release()
2464 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i); in update_sit_entry_for_release()
2465 f2fs_bug_on(sbi, 1); in update_sit_entry_for_release()
2468 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in update_sit_entry_for_release()
2476 spin_lock(&sbi->stat_lock); in update_sit_entry_for_release()
2477 sbi->unusable_block_count++; in update_sit_entry_for_release()
2478 spin_unlock(&sbi->stat_lock); in update_sit_entry_for_release()
2482 if (f2fs_block_unit_discard(sbi) && in update_sit_entry_for_release()
2484 sbi->discard_blks++; in update_sit_entry_for_release()
2493 static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se, in update_sit_entry_for_alloc() argument
2506 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", in update_sit_entry_for_alloc()
2508 f2fs_bug_on(sbi, 1); in update_sit_entry_for_alloc()
2512 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr); in update_sit_entry_for_alloc()
2513 f2fs_bug_on(sbi, 1); in update_sit_entry_for_alloc()
2518 if (f2fs_block_unit_discard(sbi) && in update_sit_entry_for_alloc()
2520 sbi->discard_blks--; in update_sit_entry_for_alloc()
2526 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { in update_sit_entry_for_alloc()
2542 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) in update_sit_entry() argument
2548 segno = GET_SEGNO(sbi, blkaddr); in update_sit_entry()
2552 se = get_seg_entry(sbi, segno); in update_sit_entry()
2554 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in update_sit_entry()
2556 f2fs_bug_on(sbi, (new_vblocks < 0 || in update_sit_entry()
2557 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); in update_sit_entry()
2563 del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del); in update_sit_entry()
2565 del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del); in update_sit_entry()
2568 __mark_sit_entry_dirty(sbi, segno); in update_sit_entry()
2571 SIT_I(sbi)->written_valid_blocks += del; in update_sit_entry()
2573 if (__is_large_section(sbi)) in update_sit_entry()
2574 get_sec_entry(sbi, segno)->valid_blocks += del; in update_sit_entry()
2577 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr, in f2fs_invalidate_blocks() argument
2580 unsigned int segno = GET_SEGNO(sbi, addr); in f2fs_invalidate_blocks()
2581 struct sit_info *sit_i = SIT_I(sbi); in f2fs_invalidate_blocks()
2583 unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1; in f2fs_invalidate_blocks()
2584 unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt; in f2fs_invalidate_blocks()
2586 f2fs_bug_on(sbi, addr == NULL_ADDR); in f2fs_invalidate_blocks()
2590 f2fs_invalidate_internal_cache(sbi, addr, len); in f2fs_invalidate_blocks()
2598 cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr); in f2fs_invalidate_blocks()
2601 update_segment_mtime(sbi, addr_start, 0); in f2fs_invalidate_blocks()
2602 update_sit_entry(sbi, addr_start, -cnt); in f2fs_invalidate_blocks()
2605 locate_dirty_segment(sbi, segno); in f2fs_invalidate_blocks()
2608 addr_start = START_BLOCK(sbi, ++segno); in f2fs_invalidate_blocks()
2610 cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1; in f2fs_invalidate_blocks()
2618 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) in f2fs_is_checkpointed_data() argument
2620 struct sit_info *sit_i = SIT_I(sbi); in f2fs_is_checkpointed_data()
2630 segno = GET_SEGNO(sbi, blkaddr); in f2fs_is_checkpointed_data()
2631 se = get_seg_entry(sbi, segno); in f2fs_is_checkpointed_data()
2632 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); in f2fs_is_checkpointed_data()
2642 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type) in f2fs_curseg_valid_blocks() argument
2644 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_curseg_valid_blocks()
2646 if (sbi->ckpt->alloc_type[type] == SSR) in f2fs_curseg_valid_blocks()
2647 return BLKS_PER_SEG(sbi); in f2fs_curseg_valid_blocks()
2654 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) in f2fs_npages_for_summary_flush() argument
2660 if (sbi->ckpt->alloc_type[i] != SSR && for_ra) in f2fs_npages_for_summary_flush()
2662 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]); in f2fs_npages_for_summary_flush()
2664 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i); in f2fs_npages_for_summary_flush()
2680 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_get_sum_page() argument
2682 if (unlikely(f2fs_cp_error(sbi))) in f2fs_get_sum_page()
2684 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno)); in f2fs_get_sum_page()
2687 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, in f2fs_update_meta_page() argument
2690 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in f2fs_update_meta_page()
2697 static void write_sum_page(struct f2fs_sb_info *sbi, in write_sum_page() argument
2700 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr); in write_sum_page()
2703 static void write_current_sum_page(struct f2fs_sb_info *sbi, in write_current_sum_page() argument
2706 struct curseg_info *curseg = CURSEG_I(sbi, type); in write_current_sum_page()
2707 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); in write_current_sum_page()
2729 static int is_next_segment_free(struct f2fs_sb_info *sbi, in is_next_segment_free() argument
2733 struct free_segmap_info *free_i = FREE_I(sbi); in is_next_segment_free()
2735 if (segno < MAIN_SEGS(sbi) && segno % SEGS_PER_SEC(sbi)) in is_next_segment_free()
2744 static int get_new_segment(struct f2fs_sb_info *sbi, in get_new_segment() argument
2747 struct free_segmap_info *free_i = FREE_I(sbi); in get_new_segment()
2749 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; in get_new_segment()
2750 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); in get_new_segment()
2751 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); in get_new_segment()
2758 if (time_to_inject(sbi, FAULT_NO_SEGMENT)) { in get_new_segment()
2763 if (!new_sec && ((*newseg + 1) % SEGS_PER_SEC(sbi))) { in get_new_segment()
2765 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); in get_new_segment()
2766 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) in get_new_segment()
2775 if (f2fs_sb_has_blkzoned(sbi)) { in get_new_segment()
2777 if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning) in get_new_segment()
2780 segno = max(sbi->first_zoned_segno, *newseg); in get_new_segment()
2781 hint = GET_SEC_FROM_SEG(sbi, segno); in get_new_segment()
2786 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); in get_new_segment()
2789 if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) { in get_new_segment()
2791 if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) { in get_new_segment()
2792 hint = GET_SEC_FROM_SEG(sbi, sbi->first_zoned_segno); in get_new_segment()
2793 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); in get_new_segment()
2796 MAIN_SECS(sbi)); in get_new_segment()
2797 if (secno >= MAIN_SECS(sbi)) { in get_new_segment()
2799 f2fs_bug_on(sbi, 1); in get_new_segment()
2805 if (secno >= MAIN_SECS(sbi)) { in get_new_segment()
2807 MAIN_SECS(sbi)); in get_new_segment()
2808 if (secno >= MAIN_SECS(sbi)) { in get_new_segment()
2810 f2fs_bug_on(sbi, !pinning); in get_new_segment()
2814 segno = GET_SEG_FROM_SEC(sbi, secno); in get_new_segment()
2815 zoneno = GET_ZONE_FROM_SEC(sbi, secno); in get_new_segment()
2820 if (sbi->secs_per_zone == 1) in get_new_segment()
2825 if (CURSEG_I(sbi, i)->zone == zoneno) in get_new_segment()
2833 hint = (zoneno + 1) * sbi->secs_per_zone; in get_new_segment()
2839 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); in get_new_segment()
2843 !f2fs_valid_pinned_area(sbi, START_BLOCK(sbi, segno))) { in get_new_segment()
2847 __set_inuse(sbi, segno); in get_new_segment()
2853 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_NO_SEGMENT); in get_new_segment()
2857 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) in reset_curseg() argument
2859 struct curseg_info *curseg = CURSEG_I(sbi, type); in reset_curseg()
2869 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); in reset_curseg()
2876 sanity_check_seg_type(sbi, seg_type); in reset_curseg()
2882 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified); in reset_curseg()
2885 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) in __get_next_segno() argument
2887 struct curseg_info *curseg = CURSEG_I(sbi, type); in __get_next_segno()
2890 sanity_check_seg_type(sbi, seg_type); in __get_next_segno()
2891 if (__is_large_section(sbi)) { in __get_next_segno()
2892 if (f2fs_need_rand_seg(sbi)) { in __get_next_segno()
2893 unsigned int hint = GET_SEC_FROM_SEG(sbi, curseg->segno); in __get_next_segno()
2895 if (GET_SEC_FROM_SEG(sbi, curseg->segno + 1) != hint) in __get_next_segno()
2898 GET_SEG_FROM_SEC(sbi, hint + 1) - 1); in __get_next_segno()
2901 } else if (f2fs_need_rand_seg(sbi)) { in __get_next_segno()
2902 return get_random_u32_below(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi)); in __get_next_segno()
2909 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in __get_next_segno()
2915 if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) in __get_next_segno()
2916 return SIT_I(sbi)->last_victim[ALLOC_NEXT]; in __get_next_segno()
2919 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in __get_next_segno()
2936 static int new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) in new_curseg() argument
2938 struct curseg_info *curseg = CURSEG_I(sbi, type); in new_curseg()
2944 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); in new_curseg()
2946 segno = __get_next_segno(sbi, type); in new_curseg()
2947 ret = get_new_segment(sbi, &segno, new_sec, pinning); in new_curseg()
2955 reset_curseg(sbi, type, 1); in new_curseg()
2957 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in new_curseg()
2959 get_random_u32_inclusive(1, sbi->max_fragment_chunk); in new_curseg()
2963 static int __next_free_blkoff(struct f2fs_sb_info *sbi, in __next_free_blkoff() argument
2966 struct seg_entry *se = get_seg_entry(sbi, segno); in __next_free_blkoff()
2968 unsigned long *target_map = SIT_I(sbi)->tmp_map; in __next_free_blkoff()
2976 return __find_rev_next_zero_bit(target_map, BLKS_PER_SEG(sbi), start); in __next_free_blkoff()
2979 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi, in f2fs_find_next_ssr_block() argument
2982 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1); in f2fs_find_next_ssr_block()
2985 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) in f2fs_segment_has_free_slot() argument
2987 return __next_free_blkoff(sbi, segno, 0) < BLKS_PER_SEG(sbi); in f2fs_segment_has_free_slot()
2994 static int change_curseg(struct f2fs_sb_info *sbi, int type) in change_curseg() argument
2996 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in change_curseg()
2997 struct curseg_info *curseg = CURSEG_I(sbi, type); in change_curseg()
3003 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); in change_curseg()
3005 __set_test_and_inuse(sbi, new_segno); in change_curseg()
3008 __remove_dirty_segment(sbi, new_segno, PRE); in change_curseg()
3009 __remove_dirty_segment(sbi, new_segno, DIRTY); in change_curseg()
3012 reset_curseg(sbi, type, 1); in change_curseg()
3014 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0); in change_curseg()
3016 sum_page = f2fs_get_sum_page(sbi, new_segno); in change_curseg()
3028 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
3031 static int get_atssr_segment(struct f2fs_sb_info *sbi, int type, in get_atssr_segment() argument
3035 struct curseg_info *curseg = CURSEG_I(sbi, type); in get_atssr_segment()
3040 if (get_ssr_segment(sbi, type, alloc_mode, age)) { in get_atssr_segment()
3041 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); in get_atssr_segment()
3044 ret = change_curseg(sbi, type); in get_atssr_segment()
3048 ret = new_curseg(sbi, type, true); in get_atssr_segment()
3050 stat_inc_seg_type(sbi, curseg); in get_atssr_segment()
3054 static int __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi, bool force) in __f2fs_init_atgc_curseg() argument
3056 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC); in __f2fs_init_atgc_curseg()
3059 if (!sbi->am.atgc_enabled && !force) in __f2fs_init_atgc_curseg()
3062 f2fs_down_read(&SM_I(sbi)->curseg_lock); in __f2fs_init_atgc_curseg()
3065 down_write(&SIT_I(sbi)->sentry_lock); in __f2fs_init_atgc_curseg()
3067 ret = get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, in __f2fs_init_atgc_curseg()
3070 up_write(&SIT_I(sbi)->sentry_lock); in __f2fs_init_atgc_curseg()
3073 f2fs_up_read(&SM_I(sbi)->curseg_lock); in __f2fs_init_atgc_curseg()
3077 int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_init_inmem_curseg() argument
3079 return __f2fs_init_atgc_curseg(sbi, false); in f2fs_init_inmem_curseg()
3082 int f2fs_reinit_atgc_curseg(struct f2fs_sb_info *sbi) in f2fs_reinit_atgc_curseg() argument
3086 if (!test_opt(sbi, ATGC)) in f2fs_reinit_atgc_curseg()
3088 if (sbi->am.atgc_enabled) in f2fs_reinit_atgc_curseg()
3090 if (le64_to_cpu(F2FS_CKPT(sbi)->elapsed_time) < in f2fs_reinit_atgc_curseg()
3091 sbi->am.age_threshold) in f2fs_reinit_atgc_curseg()
3094 ret = __f2fs_init_atgc_curseg(sbi, true); in f2fs_reinit_atgc_curseg()
3096 sbi->am.atgc_enabled = true; in f2fs_reinit_atgc_curseg()
3097 f2fs_info(sbi, "reenabled age threshold GC"); in f2fs_reinit_atgc_curseg()
3102 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type) in __f2fs_save_inmem_curseg() argument
3104 struct curseg_info *curseg = CURSEG_I(sbi, type); in __f2fs_save_inmem_curseg()
3110 if (get_valid_blocks(sbi, curseg->segno, false)) { in __f2fs_save_inmem_curseg()
3111 write_sum_page(sbi, curseg->sum_blk, in __f2fs_save_inmem_curseg()
3112 GET_SUM_BLOCK(sbi, curseg->segno)); in __f2fs_save_inmem_curseg()
3114 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_save_inmem_curseg()
3115 __set_test_and_free(sbi, curseg->segno, true); in __f2fs_save_inmem_curseg()
3116 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_save_inmem_curseg()
3122 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_save_inmem_curseg() argument
3124 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); in f2fs_save_inmem_curseg()
3126 if (sbi->am.atgc_enabled) in f2fs_save_inmem_curseg()
3127 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); in f2fs_save_inmem_curseg()
3130 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type) in __f2fs_restore_inmem_curseg() argument
3132 struct curseg_info *curseg = CURSEG_I(sbi, type); in __f2fs_restore_inmem_curseg()
3137 if (get_valid_blocks(sbi, curseg->segno, false)) in __f2fs_restore_inmem_curseg()
3140 mutex_lock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_restore_inmem_curseg()
3141 __set_test_and_inuse(sbi, curseg->segno); in __f2fs_restore_inmem_curseg()
3142 mutex_unlock(&DIRTY_I(sbi)->seglist_lock); in __f2fs_restore_inmem_curseg()
3147 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi) in f2fs_restore_inmem_curseg() argument
3149 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED); in f2fs_restore_inmem_curseg()
3151 if (sbi->am.atgc_enabled) in f2fs_restore_inmem_curseg()
3152 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC); in f2fs_restore_inmem_curseg()
3155 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, in get_ssr_segment() argument
3158 struct curseg_info *curseg = CURSEG_I(sbi, type); in get_ssr_segment()
3164 sanity_check_seg_type(sbi, seg_type); in get_ssr_segment()
3167 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, in get_ssr_segment()
3195 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, in get_ssr_segment()
3203 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in get_ssr_segment()
3204 segno = get_free_segment(sbi); in get_ssr_segment()
3213 static bool need_new_seg(struct f2fs_sb_info *sbi, int type) in need_new_seg() argument
3215 struct curseg_info *curseg = CURSEG_I(sbi, type); in need_new_seg()
3217 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && in need_new_seg()
3220 if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) && in need_new_seg()
3221 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in need_new_seg()
3223 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) in need_new_seg()
3228 int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, in f2fs_allocate_segment_for_resize() argument
3231 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_allocate_segment_for_resize()
3235 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_segment_for_resize()
3237 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_segment_for_resize()
3239 segno = CURSEG_I(sbi, type)->segno; in f2fs_allocate_segment_for_resize()
3243 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) in f2fs_allocate_segment_for_resize()
3244 ret = change_curseg(sbi, type); in f2fs_allocate_segment_for_resize()
3246 ret = new_curseg(sbi, type, true); in f2fs_allocate_segment_for_resize()
3248 stat_inc_seg_type(sbi, curseg); in f2fs_allocate_segment_for_resize()
3250 locate_dirty_segment(sbi, segno); in f2fs_allocate_segment_for_resize()
3252 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_segment_for_resize()
3255 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", in f2fs_allocate_segment_for_resize()
3259 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_segment_for_resize()
3263 static int __allocate_new_segment(struct f2fs_sb_info *sbi, int type, in __allocate_new_segment() argument
3266 struct curseg_info *curseg = CURSEG_I(sbi, type); in __allocate_new_segment()
3275 !get_valid_blocks(sbi, curseg->segno, new_sec) && in __allocate_new_segment()
3276 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec)) in __allocate_new_segment()
3281 err = new_curseg(sbi, type, true); in __allocate_new_segment()
3284 stat_inc_seg_type(sbi, curseg); in __allocate_new_segment()
3285 locate_dirty_segment(sbi, old_segno); in __allocate_new_segment()
3289 int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) in f2fs_allocate_new_section() argument
3293 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_section()
3294 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_section()
3295 ret = __allocate_new_segment(sbi, type, true, force); in f2fs_allocate_new_section()
3296 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_section()
3297 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_section()
3302 int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi) in f2fs_allocate_pinning_section() argument
3308 f2fs_lock_op(sbi); in f2fs_allocate_pinning_section()
3309 err = f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); in f2fs_allocate_pinning_section()
3310 f2fs_unlock_op(sbi); in f2fs_allocate_pinning_section()
3312 if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) { in f2fs_allocate_pinning_section()
3313 f2fs_down_write(&sbi->gc_lock); in f2fs_allocate_pinning_section()
3314 err = f2fs_gc_range(sbi, 0, GET_SEGNO(sbi, FDEV(0).end_blk), in f2fs_allocate_pinning_section()
3316 f2fs_up_write(&sbi->gc_lock); in f2fs_allocate_pinning_section()
3326 int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) in f2fs_allocate_new_segments() argument
3331 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_segments()
3332 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_segments()
3334 err += __allocate_new_segment(sbi, i, false, false); in f2fs_allocate_new_segments()
3335 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_allocate_new_segments()
3336 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_new_segments()
3341 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, in f2fs_exist_trim_candidates() argument
3347 down_write(&SIT_I(sbi)->sentry_lock); in f2fs_exist_trim_candidates()
3349 if (add_discard_addrs(sbi, cpc, true)) { in f2fs_exist_trim_candidates()
3354 up_write(&SIT_I(sbi)->sentry_lock); in f2fs_exist_trim_candidates()
3360 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi, in __issue_discard_cmd_range() argument
3364 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in __issue_discard_cmd_range()
3377 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); in __issue_discard_cmd_range()
3398 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); in __issue_discard_cmd_range()
3404 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_range()
3408 trimmed += __wait_all_discard_cmd(sbi, NULL); in __issue_discard_cmd_range()
3415 __remove_discard_cmd(sbi, dc); in __issue_discard_cmd_range()
3428 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) in f2fs_trim_fs() argument
3438 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi); in f2fs_trim_fs()
3440 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) in f2fs_trim_fs()
3443 if (end < MAIN_BLKADDR(sbi)) in f2fs_trim_fs()
3446 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { in f2fs_trim_fs()
3447 f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); in f2fs_trim_fs()
3452 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); in f2fs_trim_fs()
3453 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : in f2fs_trim_fs()
3454 GET_SEGNO(sbi, end); in f2fs_trim_fs()
3456 start_segno = rounddown(start_segno, SEGS_PER_SEC(sbi)); in f2fs_trim_fs()
3457 end_segno = roundup(end_segno + 1, SEGS_PER_SEC(sbi)) - 1; in f2fs_trim_fs()
3465 if (sbi->discard_blks == 0) in f2fs_trim_fs()
3468 f2fs_down_write(&sbi->gc_lock); in f2fs_trim_fs()
3469 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_trim_fs()
3470 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_trim_fs()
3471 f2fs_up_write(&sbi->gc_lock); in f2fs_trim_fs()
3481 if (f2fs_realtime_discard_enable(sbi)) in f2fs_trim_fs()
3484 start_block = START_BLOCK(sbi, start_segno); in f2fs_trim_fs()
3485 end_block = START_BLOCK(sbi, end_segno + 1); in f2fs_trim_fs()
3487 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); in f2fs_trim_fs()
3488 trimmed = __issue_discard_cmd_range(sbi, &dpolicy, in f2fs_trim_fs()
3491 trimmed += __wait_discard_cmd_range(sbi, &dpolicy, in f2fs_trim_fs()
3499 int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint) in f2fs_rw_hint_to_seg_type() argument
3501 if (F2FS_OPTION(sbi).active_logs == 2) in f2fs_rw_hint_to_seg_type()
3503 else if (F2FS_OPTION(sbi).active_logs == 4) in f2fs_rw_hint_to_seg_type()
3543 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi, in f2fs_io_type_to_rw_hint() argument
3603 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __get_age_segment_type() local
3609 if (ei.age <= sbi->hot_data_age_threshold) in __get_age_segment_type()
3611 if (ei.age <= sbi->warm_data_age_threshold) in __get_age_segment_type()
3628 if (fio->sbi->am.atgc_enabled && in __get_segment_type_6()
3630 (fio->sbi->gc_mode != GC_URGENT_HIGH) && in __get_segment_type_6()
3659 enum temp_type f2fs_get_segment_temp(struct f2fs_sb_info *sbi, in f2fs_get_segment_temp() argument
3662 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_get_segment_temp()
3679 f2fs_bug_on(sbi, 1); in f2fs_get_segment_temp()
3689 switch (F2FS_OPTION(fio->sbi).active_logs) { in __get_segment_type()
3700 f2fs_bug_on(fio->sbi, true); in __get_segment_type()
3703 fio->temp = f2fs_get_segment_temp(fio->sbi, type); in __get_segment_type()
3708 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi, in f2fs_randomize_chunk() argument
3716 get_random_u32_inclusive(1, sbi->max_fragment_chunk); in f2fs_randomize_chunk()
3718 get_random_u32_inclusive(1, sbi->max_fragment_hole); in f2fs_randomize_chunk()
3721 int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, in f2fs_allocate_data_block() argument
3726 struct sit_info *sit_i = SIT_I(sbi); in f2fs_allocate_data_block()
3727 struct curseg_info *curseg = CURSEG_I(sbi, type); in f2fs_allocate_data_block()
3734 f2fs_down_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_data_block()
3745 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO); in f2fs_allocate_data_block()
3746 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_allocate_data_block()
3747 sanity_check_seg_type(sbi, se->type); in f2fs_allocate_data_block()
3748 f2fs_bug_on(sbi, IS_NODESEG(se->type)); in f2fs_allocate_data_block()
3750 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); in f2fs_allocate_data_block()
3752 f2fs_bug_on(sbi, curseg->next_blkoff >= BLKS_PER_SEG(sbi)); in f2fs_allocate_data_block()
3754 f2fs_wait_discard_bio(sbi, *new_blkaddr); in f2fs_allocate_data_block()
3758 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg); in f2fs_allocate_data_block()
3761 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in f2fs_allocate_data_block()
3762 f2fs_randomize_chunk(sbi, curseg); in f2fs_allocate_data_block()
3764 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno)) in f2fs_allocate_data_block()
3766 stat_inc_block_count(sbi, curseg); in f2fs_allocate_data_block()
3769 old_mtime = get_segment_mtime(sbi, old_blkaddr); in f2fs_allocate_data_block()
3771 update_segment_mtime(sbi, old_blkaddr, 0); in f2fs_allocate_data_block()
3774 update_segment_mtime(sbi, *new_blkaddr, old_mtime); in f2fs_allocate_data_block()
3780 update_sit_entry(sbi, *new_blkaddr, 1); in f2fs_allocate_data_block()
3781 update_sit_entry(sbi, old_blkaddr, -1); in f2fs_allocate_data_block()
3789 !((curseg->segno + 1) % sbi->segs_per_sec)) { in f2fs_allocate_data_block()
3790 write_sum_page(sbi, curseg->sum_blk, in f2fs_allocate_data_block()
3791 GET_SUM_BLOCK(sbi, curseg->segno)); in f2fs_allocate_data_block()
3797 ret = get_atssr_segment(sbi, type, se->type, in f2fs_allocate_data_block()
3800 if (need_new_seg(sbi, type)) in f2fs_allocate_data_block()
3801 ret = new_curseg(sbi, type, false); in f2fs_allocate_data_block()
3803 ret = change_curseg(sbi, type); in f2fs_allocate_data_block()
3804 stat_inc_seg_type(sbi, curseg); in f2fs_allocate_data_block()
3817 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_allocate_data_block()
3818 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr)); in f2fs_allocate_data_block()
3821 atomic64_inc(&sbi->allocated_data_blocks); in f2fs_allocate_data_block()
3826 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); in f2fs_allocate_data_block()
3828 f2fs_inode_chksum_set(sbi, page); in f2fs_allocate_data_block()
3836 io = sbi->write_io[fio->type] + fio->temp; in f2fs_allocate_data_block()
3843 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_data_block()
3850 f2fs_up_read(&SM_I(sbi)->curseg_lock); in f2fs_allocate_data_block()
3854 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, in f2fs_update_device_state() argument
3857 if (!f2fs_is_multi_device(sbi)) in f2fs_update_device_state()
3861 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); in f2fs_update_device_state()
3865 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); in f2fs_update_device_state()
3868 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { in f2fs_update_device_state()
3869 spin_lock(&sbi->dev_lock); in f2fs_update_device_state()
3870 f2fs_set_bit(devidx, (char *)&sbi->dirty_device); in f2fs_update_device_state()
3871 spin_unlock(&sbi->dev_lock); in f2fs_update_device_state()
3908 bool keep_order = (f2fs_lfs_mode(fio->sbi) && in do_write_page()
3912 f2fs_down_read(&fio->sbi->io_order_lock); in do_write_page()
3914 if (f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, in do_write_page()
3919 if (f2fs_in_warm_node_list(fio->sbi, fio->page)) in do_write_page()
3920 f2fs_del_fsync_node_entry(fio->sbi, fio->page); in do_write_page()
3923 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) in do_write_page()
3924 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1); in do_write_page()
3929 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); in do_write_page()
3932 f2fs_up_read(&fio->sbi->io_order_lock); in do_write_page()
3935 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct folio *folio, in f2fs_do_write_meta_page() argument
3939 .sbi = sbi, in f2fs_do_write_meta_page()
3951 if (unlikely(folio->index >= MAIN_BLKADDR(sbi))) in f2fs_do_write_meta_page()
3957 stat_inc_meta_count(sbi, folio->index); in f2fs_do_write_meta_page()
3958 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE); in f2fs_do_write_meta_page()
3968 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE); in f2fs_do_write_node_page()
3974 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_outplace_write_data() local
3977 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); in f2fs_outplace_write_data()
3984 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE); in f2fs_outplace_write_data()
3990 struct f2fs_sb_info *sbi = fio->sbi; in f2fs_inplace_write_data() local
3997 segno = GET_SEGNO(sbi, fio->new_blkaddr); in f2fs_inplace_write_data()
3999 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { in f2fs_inplace_write_data()
4000 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_inplace_write_data()
4001 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.", in f2fs_inplace_write_data()
4004 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); in f2fs_inplace_write_data()
4008 if (f2fs_cp_error(sbi)) { in f2fs_inplace_write_data()
4014 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); in f2fs_inplace_write_data()
4016 stat_inc_inplace_blocks(fio->sbi); in f2fs_inplace_write_data()
4018 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi)) in f2fs_inplace_write_data()
4023 f2fs_update_device_state(fio->sbi, fio->ino, in f2fs_inplace_write_data()
4025 f2fs_update_iostat(fio->sbi, fio->page->mapping->host, in f2fs_inplace_write_data()
4041 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi, in __f2fs_get_curseg() argument
4047 if (CURSEG_I(sbi, i)->segno == segno) in __f2fs_get_curseg()
4053 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in f2fs_do_replace_block() argument
4058 struct sit_info *sit_i = SIT_I(sbi); in f2fs_do_replace_block()
4066 segno = GET_SEGNO(sbi, new_blkaddr); in f2fs_do_replace_block()
4067 se = get_seg_entry(sbi, segno); in f2fs_do_replace_block()
4070 f2fs_down_write(&SM_I(sbi)->curseg_lock); in f2fs_do_replace_block()
4074 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { in f2fs_do_replace_block()
4081 if (IS_CURSEG(sbi, segno)) { in f2fs_do_replace_block()
4083 type = __f2fs_get_curseg(sbi, segno); in f2fs_do_replace_block()
4084 f2fs_bug_on(sbi, type == NO_CHECK_TYPE); in f2fs_do_replace_block()
4090 curseg = CURSEG_I(sbi, type); in f2fs_do_replace_block()
4091 f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type)); in f2fs_do_replace_block()
4103 if (change_curseg(sbi, type)) in f2fs_do_replace_block()
4107 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); in f2fs_do_replace_block()
4112 update_segment_mtime(sbi, new_blkaddr, 0); in f2fs_do_replace_block()
4113 update_sit_entry(sbi, new_blkaddr, 1); in f2fs_do_replace_block()
4115 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { in f2fs_do_replace_block()
4116 f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1); in f2fs_do_replace_block()
4118 update_segment_mtime(sbi, old_blkaddr, 0); in f2fs_do_replace_block()
4119 update_sit_entry(sbi, old_blkaddr, -1); in f2fs_do_replace_block()
4122 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); in f2fs_do_replace_block()
4123 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); in f2fs_do_replace_block()
4125 locate_dirty_segment(sbi, old_cursegno); in f2fs_do_replace_block()
4130 if (change_curseg(sbi, type)) in f2fs_do_replace_block()
4140 f2fs_up_write(&SM_I(sbi)->curseg_lock); in f2fs_do_replace_block()
4143 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, in f2fs_replace_block() argument
4152 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr, in f2fs_replace_block()
4162 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in f2fs_wait_on_page_writeback() local
4165 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type); in f2fs_wait_on_page_writeback()
4167 f2fs_submit_merged_ipu_write(sbi, NULL, page); in f2fs_wait_on_page_writeback()
4170 f2fs_bug_on(sbi, locked && in f2fs_wait_on_page_writeback()
4180 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_wait_on_block_writeback() local
4189 cpage = find_lock_page(META_MAPPING(sbi), blkaddr); in f2fs_wait_on_block_writeback()
4199 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_wait_on_block_writeback_range() local
4208 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); in f2fs_wait_on_block_writeback_range()
4211 static int read_compacted_summaries(struct f2fs_sb_info *sbi) in read_compacted_summaries() argument
4213 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in read_compacted_summaries()
4220 start = start_sum_block(sbi); in read_compacted_summaries()
4222 page = f2fs_get_meta_page(sbi, start++); in read_compacted_summaries()
4228 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); in read_compacted_summaries()
4232 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); in read_compacted_summaries()
4241 seg_i = CURSEG_I(sbi, i); in read_compacted_summaries()
4245 reset_curseg(sbi, i, 0); in read_compacted_summaries()
4250 blk_off = BLKS_PER_SEG(sbi); in read_compacted_summaries()
4265 page = f2fs_get_meta_page(sbi, start++); in read_compacted_summaries()
4276 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) in read_normal_summaries() argument
4278 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in read_normal_summaries()
4292 if (__exist_node_summaries(sbi)) in read_normal_summaries()
4293 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type); in read_normal_summaries()
4295 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); in read_normal_summaries()
4301 if (__exist_node_summaries(sbi)) in read_normal_summaries()
4302 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, in read_normal_summaries()
4305 blk_addr = GET_SUM_BLOCK(sbi, segno); in read_normal_summaries()
4308 new = f2fs_get_meta_page(sbi, blk_addr); in read_normal_summaries()
4314 if (__exist_node_summaries(sbi)) { in read_normal_summaries()
4318 for (i = 0; i < BLKS_PER_SEG(sbi); i++, ns++) { in read_normal_summaries()
4323 err = f2fs_restore_node_summary(sbi, segno, sum); in read_normal_summaries()
4330 curseg = CURSEG_I(sbi, type); in read_normal_summaries()
4341 reset_curseg(sbi, type, 0); in read_normal_summaries()
4350 static int restore_curseg_summaries(struct f2fs_sb_info *sbi) in restore_curseg_summaries() argument
4352 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; in restore_curseg_summaries()
4353 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; in restore_curseg_summaries()
4357 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { in restore_curseg_summaries()
4358 int npages = f2fs_npages_for_summary_flush(sbi, true); in restore_curseg_summaries()
4361 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, in restore_curseg_summaries()
4365 err = read_compacted_summaries(sbi); in restore_curseg_summaries()
4371 if (__exist_node_summaries(sbi)) in restore_curseg_summaries()
4372 f2fs_ra_meta_pages(sbi, in restore_curseg_summaries()
4373 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type), in restore_curseg_summaries()
4377 err = read_normal_summaries(sbi, type); in restore_curseg_summaries()
4385 f2fs_err(sbi, "invalid journal entries nats %u sits %u", in restore_curseg_summaries()
4393 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) in write_compacted_summaries() argument
4402 page = f2fs_grab_meta_page(sbi, blkaddr++); in write_compacted_summaries()
4407 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); in write_compacted_summaries()
4412 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); in write_compacted_summaries()
4418 seg_i = CURSEG_I(sbi, i); in write_compacted_summaries()
4419 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) { in write_compacted_summaries()
4421 page = f2fs_grab_meta_page(sbi, blkaddr++); in write_compacted_summaries()
4445 static void write_normal_summaries(struct f2fs_sb_info *sbi, in write_normal_summaries() argument
4456 write_current_sum_page(sbi, i, blkaddr + (i - type)); in write_normal_summaries()
4459 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) in f2fs_write_data_summaries() argument
4461 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) in f2fs_write_data_summaries()
4462 write_compacted_summaries(sbi, start_blk); in f2fs_write_data_summaries()
4464 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); in f2fs_write_data_summaries()
4467 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) in f2fs_write_node_summaries() argument
4469 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); in f2fs_write_node_summaries()
4494 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, in get_current_sit_page() argument
4497 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno)); in get_current_sit_page()
4500 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, in get_next_sit_page() argument
4503 struct sit_info *sit_i = SIT_I(sbi); in get_next_sit_page()
4507 src_off = current_sit_addr(sbi, start); in get_next_sit_page()
4508 dst_off = next_sit_addr(sbi, src_off); in get_next_sit_page()
4510 page = f2fs_grab_meta_page(sbi, dst_off); in get_next_sit_page()
4511 seg_info_to_sit_page(sbi, page, start); in get_next_sit_page()
4573 static void add_sits_in_set(struct f2fs_sb_info *sbi) in add_sits_in_set() argument
4575 struct f2fs_sm_info *sm_info = SM_I(sbi); in add_sits_in_set()
4577 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; in add_sits_in_set()
4580 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) in add_sits_in_set()
4584 static void remove_sits_in_journal(struct f2fs_sb_info *sbi) in remove_sits_in_journal() argument
4586 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in remove_sits_in_journal()
4596 dirtied = __mark_sit_entry_dirty(sbi, segno); in remove_sits_in_journal()
4599 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); in remove_sits_in_journal()
4609 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) in f2fs_flush_sit_entries() argument
4611 struct sit_info *sit_i = SIT_I(sbi); in f2fs_flush_sit_entries()
4613 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in f2fs_flush_sit_entries()
4616 struct list_head *head = &SM_I(sbi)->sit_entry_set; in f2fs_flush_sit_entries()
4617 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS); in f2fs_flush_sit_entries()
4629 add_sits_in_set(sbi); in f2fs_flush_sit_entries()
4638 remove_sits_in_journal(sbi); in f2fs_flush_sit_entries()
4650 (unsigned long)MAIN_SEGS(sbi)); in f2fs_flush_sit_entries()
4660 page = get_next_sit_page(sbi, start_segno); in f2fs_flush_sit_entries()
4668 se = get_seg_entry(sbi, segno); in f2fs_flush_sit_entries()
4672 f2fs_bug_on(sbi, 1); in f2fs_flush_sit_entries()
4678 add_discard_addrs(sbi, cpc, false); in f2fs_flush_sit_entries()
4684 f2fs_bug_on(sbi, offset < 0); in f2fs_flush_sit_entries()
4689 check_block_count(sbi, segno, in f2fs_flush_sit_entries()
4695 check_block_count(sbi, segno, in f2fs_flush_sit_entries()
4709 f2fs_bug_on(sbi, ses->entry_cnt); in f2fs_flush_sit_entries()
4713 f2fs_bug_on(sbi, !list_empty(head)); in f2fs_flush_sit_entries()
4714 f2fs_bug_on(sbi, sit_i->dirty_sentries); in f2fs_flush_sit_entries()
4720 add_discard_addrs(sbi, cpc, false); in f2fs_flush_sit_entries()
4726 set_prefree_as_free_segments(sbi); in f2fs_flush_sit_entries()
4729 static int build_sit_info(struct f2fs_sb_info *sbi) in build_sit_info() argument
4731 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in build_sit_info()
4736 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0; in build_sit_info()
4739 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); in build_sit_info()
4743 SM_I(sbi)->sit_info = sit_i; in build_sit_info()
4746 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry), in build_sit_info()
4747 MAIN_SEGS(sbi)), in build_sit_info()
4752 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_sit_info()
4753 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, in build_sit_info()
4759 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map); in build_sit_info()
4761 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map); in build_sit_info()
4763 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in build_sit_info()
4769 for (start = 0; start < MAIN_SEGS(sbi); start++) { in build_sit_info()
4787 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); in build_sit_info()
4791 if (__is_large_section(sbi)) { in build_sit_info()
4793 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry), in build_sit_info()
4794 MAIN_SECS(sbi)), in build_sit_info()
4804 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); in build_sit_info()
4805 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); in build_sit_info()
4817 sit_i->invalid_segmap = f2fs_kvzalloc(sbi, in build_sit_info()
4824 sit_i->sit_blocks = SEGS_TO_BLKS(sbi, sit_segs); in build_sit_info()
4829 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); in build_sit_info()
4835 static int build_free_segmap(struct f2fs_sb_info *sbi) in build_free_segmap() argument
4841 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL); in build_free_segmap()
4845 SM_I(sbi)->free_info = free_i; in build_free_segmap()
4847 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_free_segmap()
4848 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL); in build_free_segmap()
4852 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in build_free_segmap()
4853 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL); in build_free_segmap()
4862 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); in build_free_segmap()
4869 static int build_curseg(struct f2fs_sb_info *sbi) in build_curseg() argument
4874 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, in build_curseg()
4879 SM_I(sbi)->curseg_array = array; in build_curseg()
4883 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL); in build_curseg()
4887 array[i].journal = f2fs_kzalloc(sbi, in build_curseg()
4894 return restore_curseg_summaries(sbi); in build_curseg()
4897 static int build_sit_entries(struct f2fs_sb_info *sbi) in build_sit_entries() argument
4899 struct sit_info *sit_i = SIT_I(sbi); in build_sit_entries()
4900 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); in build_sit_entries()
4904 int sit_blk_cnt = SIT_BLK_CNT(sbi); in build_sit_entries()
4911 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS, in build_sit_entries()
4917 for (; start < end && start < MAIN_SEGS(sbi); start++) { in build_sit_entries()
4922 page = get_current_sit_page(sbi, start); in build_sit_entries()
4929 err = check_block_count(sbi, start, &sit); in build_sit_entries()
4935 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", in build_sit_entries()
4937 f2fs_handle_error(sbi, in build_sit_entries()
4944 if (!f2fs_block_unit_discard(sbi)) in build_sit_entries()
4948 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { in build_sit_entries()
4955 sbi->discard_blks += BLKS_PER_SEG(sbi) - in build_sit_entries()
4958 if (__is_large_section(sbi)) in build_sit_entries()
4959 get_sec_entry(sbi, start)->valid_blocks += in build_sit_entries()
4970 if (start >= MAIN_SEGS(sbi)) { in build_sit_entries()
4971 f2fs_err(sbi, "Wrong journal entry on segno %u", in build_sit_entries()
4974 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL); in build_sit_entries()
4985 err = check_block_count(sbi, start, &sit); in build_sit_entries()
4991 f2fs_err(sbi, "Invalid segment type: %u, segno: %u", in build_sit_entries()
4994 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE); in build_sit_entries()
5000 if (f2fs_block_unit_discard(sbi)) { in build_sit_entries()
5001 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { in build_sit_entries()
5006 sbi->discard_blks += old_valid_blocks; in build_sit_entries()
5007 sbi->discard_blks -= se->valid_blocks; in build_sit_entries()
5011 if (__is_large_section(sbi)) { in build_sit_entries()
5012 get_sec_entry(sbi, start)->valid_blocks += in build_sit_entries()
5014 get_sec_entry(sbi, start)->valid_blocks -= in build_sit_entries()
5023 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) { in build_sit_entries()
5024 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", in build_sit_entries()
5025 sit_valid_blocks[NODE], valid_node_count(sbi)); in build_sit_entries()
5026 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT); in build_sit_entries()
5031 valid_user_blocks(sbi)) { in build_sit_entries()
5032 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u", in build_sit_entries()
5034 valid_user_blocks(sbi)); in build_sit_entries()
5035 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT); in build_sit_entries()
5042 static void init_free_segmap(struct f2fs_sb_info *sbi) in init_free_segmap() argument
5048 for (start = 0; start < MAIN_SEGS(sbi); start++) { in init_free_segmap()
5049 if (f2fs_usable_blks_in_seg(sbi, start) == 0) in init_free_segmap()
5051 sentry = get_seg_entry(sbi, start); in init_free_segmap()
5053 __set_free(sbi, start); in init_free_segmap()
5055 SIT_I(sbi)->written_valid_blocks += in init_free_segmap()
5061 struct curseg_info *curseg_t = CURSEG_I(sbi, type); in init_free_segmap()
5063 __set_test_and_inuse(sbi, curseg_t->segno); in init_free_segmap()
5067 static void init_dirty_segmap(struct f2fs_sb_info *sbi) in init_dirty_segmap() argument
5069 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in init_dirty_segmap()
5070 struct free_segmap_info *free_i = FREE_I(sbi); in init_dirty_segmap()
5076 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); in init_dirty_segmap()
5077 if (segno >= MAIN_SEGS(sbi)) in init_dirty_segmap()
5080 valid_blocks = get_valid_blocks(sbi, segno, false); in init_dirty_segmap()
5081 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); in init_dirty_segmap()
5085 f2fs_bug_on(sbi, 1); in init_dirty_segmap()
5089 __locate_dirty_segment(sbi, segno, DIRTY); in init_dirty_segmap()
5093 if (!__is_large_section(sbi)) in init_dirty_segmap()
5097 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in init_dirty_segmap()
5098 valid_blocks = get_valid_blocks(sbi, segno, true); in init_dirty_segmap()
5099 secno = GET_SEC_FROM_SEG(sbi, segno); in init_dirty_segmap()
5101 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi)) in init_dirty_segmap()
5103 if (IS_CURSEC(sbi, secno)) in init_dirty_segmap()
5110 static int init_victim_secmap(struct f2fs_sb_info *sbi) in init_victim_secmap() argument
5112 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in init_victim_secmap()
5113 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in init_victim_secmap()
5115 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in init_victim_secmap()
5119 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); in init_victim_secmap()
5128 static int build_dirty_segmap(struct f2fs_sb_info *sbi) in build_dirty_segmap() argument
5134 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info), in build_dirty_segmap()
5139 SM_I(sbi)->dirty_info = dirty_i; in build_dirty_segmap()
5142 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); in build_dirty_segmap()
5145 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size, in build_dirty_segmap()
5151 if (__is_large_section(sbi)) { in build_dirty_segmap()
5152 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); in build_dirty_segmap()
5153 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, in build_dirty_segmap()
5159 init_dirty_segmap(sbi); in build_dirty_segmap()
5160 return init_victim_secmap(sbi); in build_dirty_segmap()
5163 static int sanity_check_curseg(struct f2fs_sb_info *sbi) in sanity_check_curseg() argument
5172 struct curseg_info *curseg = CURSEG_I(sbi, i); in sanity_check_curseg()
5173 struct seg_entry *se = get_seg_entry(sbi, curseg->segno); in sanity_check_curseg()
5176 if (f2fs_sb_has_readonly(sbi) && in sanity_check_curseg()
5180 sanity_check_seg_type(sbi, curseg->seg_type); in sanity_check_curseg()
5183 f2fs_err(sbi, in sanity_check_curseg()
5186 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); in sanity_check_curseg()
5196 for (blkofs += 1; blkofs < BLKS_PER_SEG(sbi); blkofs++) { in sanity_check_curseg()
5200 f2fs_err(sbi, in sanity_check_curseg()
5204 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG); in sanity_check_curseg()
5212 static int check_zone_write_pointer(struct f2fs_sb_info *sbi, in check_zone_write_pointer() argument
5218 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; in check_zone_write_pointer()
5226 zone_segno = GET_SEGNO(sbi, zone_block); in check_zone_write_pointer()
5232 if (zone_segno >= MAIN_SEGS(sbi)) in check_zone_write_pointer()
5238 valid_block_cnt = get_valid_blocks(sbi, zone_segno, true); in check_zone_write_pointer()
5239 if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) { in check_zone_write_pointer()
5240 f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]", in check_zone_write_pointer()
5251 f2fs_notice(sbi, "Zone without valid block has non-zero write " in check_zone_write_pointer()
5254 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, in check_zone_write_pointer()
5257 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", in check_zone_write_pointer()
5269 f2fs_notice(sbi, "Valid blocks are not aligned with write " in check_zone_write_pointer()
5282 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)", in check_zone_write_pointer()
5285 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)", in check_zone_write_pointer()
5292 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi, in get_target_zoned_dev() argument
5297 for (i = 0; i < sbi->s_ndevs; i++) { in get_target_zoned_dev()
5300 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr && in get_target_zoned_dev()
5315 static int do_fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type) in do_fix_curseg_write_pointer() argument
5317 struct curseg_info *cs = CURSEG_I(sbi, type); in do_fix_curseg_write_pointer()
5322 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT; in do_fix_curseg_write_pointer()
5326 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); in do_fix_curseg_write_pointer()
5327 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); in do_fix_curseg_write_pointer()
5329 zbd = get_target_zoned_dev(sbi, cs_zone_block); in do_fix_curseg_write_pointer()
5339 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", in do_fix_curseg_write_pointer()
5351 if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in do_fix_curseg_write_pointer()
5353 wp_segno = GET_SEGNO(sbi, wp_block); in do_fix_curseg_write_pointer()
5354 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno); in do_fix_curseg_write_pointer()
5361 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: " in do_fix_curseg_write_pointer()
5368 cs->segno != GET_SEG_FROM_SEC(sbi, GET_ZONE_FROM_SEC(sbi, cs_section))) { in do_fix_curseg_write_pointer()
5371 f2fs_allocate_new_section(sbi, type, true); in do_fix_curseg_write_pointer()
5372 f2fs_notice(sbi, "Assign new section to curseg[%d]: " in do_fix_curseg_write_pointer()
5379 if (check_zone_write_pointer(sbi, zbd, &zone)) in do_fix_curseg_write_pointer()
5383 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno); in do_fix_curseg_write_pointer()
5384 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section)); in do_fix_curseg_write_pointer()
5386 zbd = get_target_zoned_dev(sbi, cs_zone_block); in do_fix_curseg_write_pointer()
5395 f2fs_err(sbi, "Report zone failed: %s errno=(%d)", in do_fix_curseg_write_pointer()
5404 f2fs_notice(sbi, in do_fix_curseg_write_pointer()
5408 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block, in do_fix_curseg_write_pointer()
5411 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)", in do_fix_curseg_write_pointer()
5420 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi) in fix_curseg_write_pointer() argument
5425 ret = do_fix_curseg_write_pointer(sbi, i); in fix_curseg_write_pointer()
5434 struct f2fs_sb_info *sbi; member
5445 return check_zone_write_pointer(args->sbi, args->fdev, zone); in check_zone_write_pointer_cb()
5448 static int check_write_pointer(struct f2fs_sb_info *sbi) in check_write_pointer() argument
5453 for (i = 0; i < sbi->s_ndevs; i++) { in check_write_pointer()
5457 args.sbi = sbi; in check_write_pointer()
5468 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi) in f2fs_check_and_fix_write_pointer() argument
5472 if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) || in f2fs_check_and_fix_write_pointer()
5473 f2fs_hw_is_readonly(sbi)) in f2fs_check_and_fix_write_pointer()
5476 f2fs_notice(sbi, "Checking entire write pointers"); in f2fs_check_and_fix_write_pointer()
5477 ret = fix_curseg_write_pointer(sbi); in f2fs_check_and_fix_write_pointer()
5479 ret = check_write_pointer(sbi); in f2fs_check_and_fix_write_pointer()
5492 struct f2fs_sb_info *sbi, unsigned int segno) in f2fs_usable_zone_blks_in_seg() argument
5497 if (!sbi->unusable_blocks_per_sec) in f2fs_usable_zone_blks_in_seg()
5498 return BLKS_PER_SEG(sbi); in f2fs_usable_zone_blks_in_seg()
5500 secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_usable_zone_blks_in_seg()
5501 seg_start = START_BLOCK(sbi, segno); in f2fs_usable_zone_blks_in_seg()
5502 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno)); in f2fs_usable_zone_blks_in_seg()
5503 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi); in f2fs_usable_zone_blks_in_seg()
5513 if (seg_start + BLKS_PER_SEG(sbi) > sec_cap_blkaddr) in f2fs_usable_zone_blks_in_seg()
5516 return BLKS_PER_SEG(sbi); in f2fs_usable_zone_blks_in_seg()
5519 int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi) in f2fs_check_and_fix_write_pointer() argument
5524 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi, in f2fs_usable_zone_blks_in_seg() argument
5531 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, in f2fs_usable_blks_in_seg() argument
5534 if (f2fs_sb_has_blkzoned(sbi)) in f2fs_usable_blks_in_seg()
5535 return f2fs_usable_zone_blks_in_seg(sbi, segno); in f2fs_usable_blks_in_seg()
5537 return BLKS_PER_SEG(sbi); in f2fs_usable_blks_in_seg()
5540 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi) in f2fs_usable_segs_in_sec() argument
5542 if (f2fs_sb_has_blkzoned(sbi)) in f2fs_usable_segs_in_sec()
5543 return CAP_SEGS_PER_SEC(sbi); in f2fs_usable_segs_in_sec()
5545 return SEGS_PER_SEC(sbi); in f2fs_usable_segs_in_sec()
5548 unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi, in f2fs_get_section_mtime() argument
5551 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi); in f2fs_get_section_mtime()
5557 secno = GET_SEC_FROM_SEG(sbi, segno); in f2fs_get_section_mtime()
5558 start = GET_SEG_FROM_SEC(sbi, secno); in f2fs_get_section_mtime()
5560 if (!__is_large_section(sbi)) { in f2fs_get_section_mtime()
5561 mtime = get_seg_entry(sbi, start + i)->mtime; in f2fs_get_section_mtime()
5567 struct seg_entry *se = get_seg_entry(sbi, start+i); in f2fs_get_section_mtime()
5586 static void init_min_max_mtime(struct f2fs_sb_info *sbi) in init_min_max_mtime() argument
5588 struct sit_info *sit_i = SIT_I(sbi); in init_min_max_mtime()
5595 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in init_min_max_mtime()
5598 mtime = f2fs_get_section_mtime(sbi, segno); in init_min_max_mtime()
5603 sit_i->max_mtime = get_mtime(sbi, false); in init_min_max_mtime()
5608 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) in f2fs_build_segment_manager() argument
5610 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_build_segment_manager()
5611 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_build_segment_manager()
5615 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL); in f2fs_build_segment_manager()
5620 sbi->sm_info = sm_info; in f2fs_build_segment_manager()
5633 if (!f2fs_lfs_mode(sbi)) in f2fs_build_segment_manager()
5637 sm_info->min_seq_blocks = BLKS_PER_SEG(sbi); in f2fs_build_segment_manager()
5639 sm_info->min_ssr_sections = reserved_sections(sbi); in f2fs_build_segment_manager()
5645 err = f2fs_create_flush_cmd_control(sbi); in f2fs_build_segment_manager()
5649 err = create_discard_cmd_control(sbi); in f2fs_build_segment_manager()
5653 err = build_sit_info(sbi); in f2fs_build_segment_manager()
5656 err = build_free_segmap(sbi); in f2fs_build_segment_manager()
5659 err = build_curseg(sbi); in f2fs_build_segment_manager()
5664 err = build_sit_entries(sbi); in f2fs_build_segment_manager()
5668 init_free_segmap(sbi); in f2fs_build_segment_manager()
5669 err = build_dirty_segmap(sbi); in f2fs_build_segment_manager()
5673 err = sanity_check_curseg(sbi); in f2fs_build_segment_manager()
5677 init_min_max_mtime(sbi); in f2fs_build_segment_manager()
5681 static void discard_dirty_segmap(struct f2fs_sb_info *sbi, in discard_dirty_segmap() argument
5684 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in discard_dirty_segmap()
5692 static void destroy_victim_secmap(struct f2fs_sb_info *sbi) in destroy_victim_secmap() argument
5694 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in destroy_victim_secmap()
5700 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) in destroy_dirty_segmap() argument
5702 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in destroy_dirty_segmap()
5710 discard_dirty_segmap(sbi, i); in destroy_dirty_segmap()
5712 if (__is_large_section(sbi)) { in destroy_dirty_segmap()
5718 destroy_victim_secmap(sbi); in destroy_dirty_segmap()
5719 SM_I(sbi)->dirty_info = NULL; in destroy_dirty_segmap()
5723 static void destroy_curseg(struct f2fs_sb_info *sbi) in destroy_curseg() argument
5725 struct curseg_info *array = SM_I(sbi)->curseg_array; in destroy_curseg()
5730 SM_I(sbi)->curseg_array = NULL; in destroy_curseg()
5738 static void destroy_free_segmap(struct f2fs_sb_info *sbi) in destroy_free_segmap() argument
5740 struct free_segmap_info *free_i = SM_I(sbi)->free_info; in destroy_free_segmap()
5744 SM_I(sbi)->free_info = NULL; in destroy_free_segmap()
5750 static void destroy_sit_info(struct f2fs_sb_info *sbi) in destroy_sit_info() argument
5752 struct sit_info *sit_i = SIT_I(sbi); in destroy_sit_info()
5765 SM_I(sbi)->sit_info = NULL; in destroy_sit_info()
5774 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi) in f2fs_destroy_segment_manager() argument
5776 struct f2fs_sm_info *sm_info = SM_I(sbi); in f2fs_destroy_segment_manager()
5780 f2fs_destroy_flush_cmd_control(sbi, true); in f2fs_destroy_segment_manager()
5781 destroy_discard_cmd_control(sbi); in f2fs_destroy_segment_manager()
5782 destroy_dirty_segmap(sbi); in f2fs_destroy_segment_manager()
5783 destroy_curseg(sbi); in f2fs_destroy_segment_manager()
5784 destroy_free_segmap(sbi); in f2fs_destroy_segment_manager()
5785 destroy_sit_info(sbi); in f2fs_destroy_segment_manager()
5786 sbi->sm_info = NULL; in f2fs_destroy_segment_manager()