Lines Matching full:sbi
32 void f2fs_update_sit_info(struct f2fs_sb_info *sbi) in f2fs_update_sit_info() argument
34 struct f2fs_stat_info *si = F2FS_STAT(sbi); in f2fs_update_sit_info()
42 blks_per_sec = CAP_BLKS_PER_SEC(sbi); in f2fs_update_sit_info()
44 for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) { in f2fs_update_sit_info()
45 vblocks = get_valid_blocks(sbi, segno, true); in f2fs_update_sit_info()
54 dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100); in f2fs_update_sit_info()
63 static void update_multidevice_stats(struct f2fs_sb_info *sbi) in update_multidevice_stats() argument
65 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_multidevice_stats()
69 if (!f2fs_is_multi_device(sbi)) in update_multidevice_stats()
72 memset(dev_stats, 0, sizeof(struct f2fs_dev_stats) * sbi->s_ndevs); in update_multidevice_stats()
73 for (i = 0; i < sbi->s_ndevs; i++) { in update_multidevice_stats()
78 start_blk = MAIN_BLKADDR(sbi); in update_multidevice_stats()
79 end_blk = FDEV(i).end_blk + 1 - SEG0_BLKADDR(sbi); in update_multidevice_stats()
85 start_segno = GET_SEGNO(sbi, start_blk); in update_multidevice_stats()
86 end_segno = GET_SEGNO(sbi, end_blk); in update_multidevice_stats()
91 seg_blks = get_seg_entry(sbi, j)->valid_blocks; in update_multidevice_stats()
94 if (IS_CURSEG(sbi, j)) in update_multidevice_stats()
96 else if (seg_blks == BLKS_PER_SEG(sbi)) in update_multidevice_stats()
100 else if (!test_bit(j, FREE_I(sbi)->free_segmap)) in update_multidevice_stats()
105 if (!__is_large_section(sbi) || in update_multidevice_stats()
106 (j % SEGS_PER_SEC(sbi)) != 0) in update_multidevice_stats()
109 sec_blks = get_sec_entry(sbi, j)->valid_blocks; in update_multidevice_stats()
112 if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, j))) in update_multidevice_stats()
114 else if (sec_blks == BLKS_PER_SEC(sbi)) in update_multidevice_stats()
118 else if (!test_bit(GET_SEC_FROM_SEG(sbi, j), in update_multidevice_stats()
119 FREE_I(sbi)->free_secmap)) in update_multidevice_stats()
127 static void update_general_status(struct f2fs_sb_info *sbi) in update_general_status() argument
129 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_general_status()
130 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in update_general_status()
141 struct extent_tree_info *eti = &sbi->extent_tree[i]; in update_general_status()
143 si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]); in update_general_status()
144 si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]); in update_general_status()
145 si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]); in update_general_status()
152 si->hit_largest = atomic64_read(&sbi->read_hit_largest); in update_general_status()
156 si->allocated_data_blocks = atomic64_read(&sbi->allocated_data_blocks); in update_general_status()
159 si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); in update_general_status()
160 si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); in update_general_status()
161 si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); in update_general_status()
162 si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA); in update_general_status()
163 si->ndirty_qdata = get_pages(sbi, F2FS_DIRTY_QDATA); in update_general_status()
164 si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA); in update_general_status()
165 si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; in update_general_status()
166 si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; in update_general_status()
167 si->nquota_files = sbi->nquota_files; in update_general_status()
168 si->ndirty_all = sbi->ndirty_inode[DIRTY_META]; in update_general_status()
169 si->aw_cnt = atomic_read(&sbi->atomic_files); in update_general_status()
170 si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt); in update_general_status()
171 si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ); in update_general_status()
172 si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE); in update_general_status()
173 si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA); in update_general_status()
174 si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA); in update_general_status()
175 si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA); in update_general_status()
176 si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE); in update_general_status()
177 si->nr_rd_meta = get_pages(sbi, F2FS_RD_META); in update_general_status()
178 if (SM_I(sbi)->fcc_info) { in update_general_status()
180 atomic_read(&SM_I(sbi)->fcc_info->issued_flush); in update_general_status()
182 atomic_read(&SM_I(sbi)->fcc_info->queued_flush); in update_general_status()
184 llist_empty(&SM_I(sbi)->fcc_info->issue_list); in update_general_status()
186 if (SM_I(sbi)->dcc_info) { in update_general_status()
188 atomic_read(&SM_I(sbi)->dcc_info->issued_discard); in update_general_status()
190 atomic_read(&SM_I(sbi)->dcc_info->queued_discard); in update_general_status()
192 atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt); in update_general_status()
193 si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks; in update_general_status()
195 si->nr_issued_ckpt = atomic_read(&sbi->cprc_info.issued_ckpt); in update_general_status()
196 si->nr_total_ckpt = atomic_read(&sbi->cprc_info.total_ckpt); in update_general_status()
197 si->nr_queued_ckpt = atomic_read(&sbi->cprc_info.queued_ckpt); in update_general_status()
198 spin_lock(&sbi->cprc_info.stat_lock); in update_general_status()
199 si->cur_ckpt_time = sbi->cprc_info.cur_time; in update_general_status()
200 si->peak_ckpt_time = sbi->cprc_info.peak_time; in update_general_status()
201 spin_unlock(&sbi->cprc_info.stat_lock); in update_general_status()
202 si->total_count = BLKS_TO_SEGS(sbi, (int)sbi->user_block_count); in update_general_status()
203 si->rsvd_segs = reserved_segments(sbi); in update_general_status()
204 si->overp_segs = overprovision_segments(sbi); in update_general_status()
205 si->valid_count = valid_user_blocks(sbi); in update_general_status()
206 si->discard_blks = discard_blocks(sbi); in update_general_status()
207 si->valid_node_count = valid_node_count(sbi); in update_general_status()
208 si->valid_inode_count = valid_inode_count(sbi); in update_general_status()
209 si->inline_xattr = atomic_read(&sbi->inline_xattr); in update_general_status()
210 si->inline_inode = atomic_read(&sbi->inline_inode); in update_general_status()
211 si->inline_dir = atomic_read(&sbi->inline_dir); in update_general_status()
212 si->compr_inode = atomic_read(&sbi->compr_inode); in update_general_status()
213 si->swapfile_inode = atomic_read(&sbi->swapfile_inode); in update_general_status()
214 si->compr_blocks = atomic64_read(&sbi->compr_blocks); in update_general_status()
215 si->append = sbi->im[APPEND_INO].ino_num; in update_general_status()
216 si->update = sbi->im[UPDATE_INO].ino_num; in update_general_status()
217 si->orphans = sbi->im[ORPHAN_INO].ino_num; in update_general_status()
218 si->utilization = utilization(sbi); in update_general_status()
220 si->free_segs = free_segments(sbi); in update_general_status()
221 si->free_secs = free_sections(sbi); in update_general_status()
222 si->prefree_count = prefree_segments(sbi); in update_general_status()
223 si->dirty_count = dirty_segments(sbi); in update_general_status()
224 if (sbi->node_inode) in update_general_status()
225 si->node_pages = NODE_MAPPING(sbi)->nrpages; in update_general_status()
226 if (sbi->meta_inode) in update_general_status()
227 si->meta_pages = META_MAPPING(sbi)->nrpages; in update_general_status()
229 if (sbi->compress_inode) { in update_general_status()
230 si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages; in update_general_status()
231 si->compress_page_hit = atomic_read(&sbi->compress_page_hit); in update_general_status()
234 si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT]; in update_general_status()
235 si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT]; in update_general_status()
236 si->sits = MAIN_SEGS(sbi); in update_general_status()
237 si->dirty_sits = SIT_I(sbi)->dirty_sentries; in update_general_status()
238 si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID]; in update_general_status()
239 si->avail_nids = NM_I(sbi)->available_nids; in update_general_status()
240 si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID]; in update_general_status()
241 si->io_skip_bggc = sbi->io_skip_bggc; in update_general_status()
242 si->other_skip_bggc = sbi->other_skip_bggc; in update_general_status()
243 si->util_free = (int)(BLKS_TO_SEGS(sbi, free_user_blocks(sbi))) in update_general_status()
244 * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) in update_general_status()
246 si->util_valid = (int)(BLKS_TO_SEGS(sbi, written_block_count(sbi))) in update_general_status()
247 * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg) in update_general_status()
251 struct curseg_info *curseg = CURSEG_I(sbi, i); in update_general_status()
254 si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno); in update_general_status()
255 si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]); in update_general_status()
259 si->meta_count[i] = atomic_read(&sbi->meta_count[i]); in update_general_status()
267 for (i = 0; i < MAIN_SEGS(sbi); i++) { in update_general_status()
268 int blks = get_seg_entry(sbi, i)->valid_blocks; in update_general_status()
269 int type = get_seg_entry(sbi, i)->type; in update_general_status()
274 if (blks == BLKS_PER_SEG(sbi)) in update_general_status()
281 update_multidevice_stats(sbi); in update_general_status()
284 si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]); in update_general_status()
287 si->segment_count[i] = sbi->segment_count[i]; in update_general_status()
288 si->block_count[i] = sbi->block_count[i]; in update_general_status()
291 si->inplace_count = atomic_read(&sbi->inplace_count); in update_general_status()
297 static void update_mem_info(struct f2fs_sb_info *sbi) in update_mem_info() argument
299 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_mem_info()
309 si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize; in update_mem_info()
311 si->base_mem += sizeof(*sbi->ckpt); in update_mem_info()
318 si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry); in update_mem_info()
319 si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); in update_mem_info()
320 si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); in update_mem_info()
321 si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); in update_mem_info()
323 if (__is_large_section(sbi)) in update_mem_info()
324 si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); in update_mem_info()
325 si->base_mem += __bitmap_size(sbi, SIT_BITMAP); in update_mem_info()
329 si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); in update_mem_info()
330 si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi)); in update_mem_info()
338 si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(MAIN_SEGS(sbi)); in update_mem_info()
339 si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi)); in update_mem_info()
343 si->base_mem += __bitmap_size(sbi, NAT_BITMAP); in update_mem_info()
344 si->base_mem += F2FS_BLK_TO_BYTES(NM_I(sbi)->nat_bits_blocks); in update_mem_info()
345 si->base_mem += NM_I(sbi)->nat_blocks * in update_mem_info()
347 si->base_mem += NM_I(sbi)->nat_blocks / 8; in update_mem_info()
348 si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short); in update_mem_info()
354 if (sbi->gc_thread) in update_mem_info()
358 if (SM_I(sbi)->fcc_info) in update_mem_info()
360 if (SM_I(sbi)->dcc_info) { in update_mem_info()
363 atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt); in update_mem_info()
367 si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID] + in update_mem_info()
368 NM_I(sbi)->nid_cnt[PREALLOC_NID]) * in update_mem_info()
370 si->cache_mem += NM_I(sbi)->nat_cnt[TOTAL_NAT] * in update_mem_info()
372 si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] * in update_mem_info()
375 si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); in update_mem_info()
378 struct extent_tree_info *eti = &sbi->extent_tree[i]; in update_mem_info()
388 if (sbi->node_inode) { in update_mem_info()
389 unsigned long npages = NODE_MAPPING(sbi)->nrpages; in update_mem_info()
393 if (sbi->meta_inode) { in update_mem_info()
394 unsigned long npages = META_MAPPING(sbi)->nrpages; in update_mem_info()
399 if (sbi->compress_inode) { in update_mem_info()
400 unsigned long npages = COMPRESS_MAPPING(sbi)->nrpages; in update_mem_info()
445 struct f2fs_sb_info *sbi = si->sbi; in stat_show() local
447 update_general_status(sbi); in stat_show()
450 sbi->sb->s_bdev, i++, in stat_show()
451 f2fs_readonly(sbi->sb) ? "RO" : "RW", in stat_show()
452 is_set_ckpt_flags(sbi, CP_DISABLED_FLAG) ? in stat_show()
453 "Disabled" : (f2fs_cp_error(sbi) ? "Error" : "Good")); in stat_show()
454 if (sbi->s_flag) { in stat_show()
455 seq_puts(s, "[SBI:"); in stat_show()
456 for_each_set_bit(j, &sbi->s_flag, MAX_SBI_FLAG) in stat_show()
468 SIT_I(sbi)->mounted_time); in stat_show()
472 if (IS_F2FS_IPU_DISABLE(sbi)) { in stat_show()
475 unsigned long policy = SM_I(sbi)->ipu_policy; in stat_show()
482 if (test_opt(sbi, DISCARD)) in stat_show()
567 if (f2fs_is_multi_device(sbi)) { in stat_show()
571 if (__is_large_section(sbi)) in stat_show()
577 for (i = 0; i < sbi->s_ndevs; i++) { in stat_show()
584 if (!__is_large_section(sbi)) { in stat_show()
618 if (__is_large_section(sbi)) { in stat_show()
633 seq_printf(s, " - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]); in stat_show()
634 seq_printf(s, " - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]); in stat_show()
636 sbi->gc_reclaimed_segs[GC_IDLE_GREEDY]); in stat_show()
637 seq_printf(s, " - Idle AT : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_AT]); in stat_show()
639 sbi->gc_reclaimed_segs[GC_URGENT_HIGH]); in stat_show()
640 seq_printf(s, " - Urgent Mid : %d\n", sbi->gc_reclaimed_segs[GC_URGENT_MID]); in stat_show()
641 seq_printf(s, " - Urgent Low : %d\n", sbi->gc_reclaimed_segs[GC_URGENT_LOW]); in stat_show()
707 &sbi->rf_node_block_count)); in stat_show()
734 f2fs_update_sit_info(sbi); in stat_show()
739 update_mem_info(sbi); in stat_show()
760 int f2fs_build_stats(struct f2fs_sb_info *sbi) in f2fs_build_stats() argument
762 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_build_stats()
768 si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL); in f2fs_build_stats()
772 dev_stats = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_stats) * in f2fs_build_stats()
773 sbi->s_ndevs, GFP_KERNEL); in f2fs_build_stats()
789 si->sbi = sbi; in f2fs_build_stats()
790 sbi->stat_info = si; in f2fs_build_stats()
794 atomic64_set(&sbi->total_hit_ext[i], 0); in f2fs_build_stats()
795 atomic64_set(&sbi->read_hit_rbtree[i], 0); in f2fs_build_stats()
796 atomic64_set(&sbi->read_hit_cached[i], 0); in f2fs_build_stats()
800 atomic64_set(&sbi->read_hit_largest, 0); in f2fs_build_stats()
802 atomic_set(&sbi->inline_xattr, 0); in f2fs_build_stats()
803 atomic_set(&sbi->inline_inode, 0); in f2fs_build_stats()
804 atomic_set(&sbi->inline_dir, 0); in f2fs_build_stats()
805 atomic_set(&sbi->compr_inode, 0); in f2fs_build_stats()
806 atomic64_set(&sbi->compr_blocks, 0); in f2fs_build_stats()
807 atomic_set(&sbi->swapfile_inode, 0); in f2fs_build_stats()
808 atomic_set(&sbi->atomic_files, 0); in f2fs_build_stats()
809 atomic_set(&sbi->inplace_count, 0); in f2fs_build_stats()
811 atomic_set(&sbi->meta_count[i], 0); in f2fs_build_stats()
813 atomic_set(&sbi->cp_call_count[i], 0); in f2fs_build_stats()
815 atomic_set(&sbi->max_aw_cnt, 0); in f2fs_build_stats()
824 void f2fs_destroy_stats(struct f2fs_sb_info *sbi) in f2fs_destroy_stats() argument
826 struct f2fs_stat_info *si = F2FS_STAT(sbi); in f2fs_destroy_stats()