Lines Matching full:ip

91 	struct xfs_inode	*ip;  in xfs_inode_alloc()  local
97 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); in xfs_inode_alloc()
99 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
100 kmem_cache_free(xfs_inode_cache, ip); in xfs_inode_alloc()
105 VFS_I(ip)->i_mode = 0; in xfs_inode_alloc()
106 mapping_set_folio_min_order(VFS_I(ip)->i_mapping, in xfs_inode_alloc()
110 ASSERT(atomic_read(&ip->i_pincount) == 0); in xfs_inode_alloc()
111 ASSERT(ip->i_ino == 0); in xfs_inode_alloc()
114 ip->i_ino = ino; in xfs_inode_alloc()
115 ip->i_mount = mp; in xfs_inode_alloc()
116 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); in xfs_inode_alloc()
117 ip->i_cowfp = NULL; in xfs_inode_alloc()
118 memset(&ip->i_af, 0, sizeof(ip->i_af)); in xfs_inode_alloc()
119 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; in xfs_inode_alloc()
120 memset(&ip->i_df, 0, sizeof(ip->i_df)); in xfs_inode_alloc()
121 ip->i_flags = 0; in xfs_inode_alloc()
122 ip->i_delayed_blks = 0; in xfs_inode_alloc()
123 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
124 ip->i_nblocks = 0; in xfs_inode_alloc()
125 ip->i_forkoff = 0; in xfs_inode_alloc()
126 ip->i_sick = 0; in xfs_inode_alloc()
127 ip->i_checked = 0; in xfs_inode_alloc()
128 INIT_WORK(&ip->i_ioend_work, xfs_end_io); in xfs_inode_alloc()
129 INIT_LIST_HEAD(&ip->i_ioend_list); in xfs_inode_alloc()
130 spin_lock_init(&ip->i_ioend_lock); in xfs_inode_alloc()
131 ip->i_next_unlinked = NULLAGINO; in xfs_inode_alloc()
132 ip->i_prev_unlinked = 0; in xfs_inode_alloc()
134 return ip; in xfs_inode_alloc()
142 struct xfs_inode *ip = XFS_I(inode); in xfs_inode_free_callback() local
144 switch (VFS_I(ip)->i_mode & S_IFMT) { in xfs_inode_free_callback()
148 xfs_idestroy_fork(&ip->i_df); in xfs_inode_free_callback()
152 xfs_ifork_zap_attr(ip); in xfs_inode_free_callback()
154 if (ip->i_cowfp) { in xfs_inode_free_callback()
155 xfs_idestroy_fork(ip->i_cowfp); in xfs_inode_free_callback()
156 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp); in xfs_inode_free_callback()
158 if (ip->i_itemp) { in xfs_inode_free_callback()
160 &ip->i_itemp->ili_item.li_flags)); in xfs_inode_free_callback()
161 xfs_inode_item_destroy(ip); in xfs_inode_free_callback()
162 ip->i_itemp = NULL; in xfs_inode_free_callback()
165 kmem_cache_free(xfs_inode_cache, ip); in xfs_inode_free_callback()
170 struct xfs_inode *ip) in __xfs_inode_free() argument
173 ASSERT(atomic_read(&ip->i_pincount) == 0); in __xfs_inode_free()
174 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); in __xfs_inode_free()
175 XFS_STATS_DEC(ip->i_mount, vn_active); in __xfs_inode_free()
177 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); in __xfs_inode_free()
182 struct xfs_inode *ip) in xfs_inode_free() argument
184 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING)); in xfs_inode_free()
189 * free state. The ip->i_flags_lock provides the barrier against lookup in xfs_inode_free()
192 spin_lock(&ip->i_flags_lock); in xfs_inode_free()
193 ip->i_flags = XFS_IRECLAIM; in xfs_inode_free()
194 ip->i_ino = 0; in xfs_inode_free()
195 spin_unlock(&ip->i_flags_lock); in xfs_inode_free()
197 __xfs_inode_free(ip); in xfs_inode_free()
361 struct xfs_inode *ip) __releases(&ip->i_flags_lock) in xfs_iget_recycle() argument
363 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle()
364 struct inode *inode = VFS_I(ip); in xfs_iget_recycle()
367 trace_xfs_iget_recycle(ip); in xfs_iget_recycle()
369 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) in xfs_iget_recycle()
378 ip->i_flags |= XFS_IRECLAIM; in xfs_iget_recycle()
380 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
385 xfs_iunlock(ip, XFS_ILOCK_EXCL); in xfs_iget_recycle()
392 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
393 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); in xfs_iget_recycle()
394 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); in xfs_iget_recycle()
395 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
398 trace_xfs_iget_recycle_fail(ip); in xfs_iget_recycle()
403 spin_lock(&ip->i_flags_lock); in xfs_iget_recycle()
410 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; in xfs_iget_recycle()
411 ip->i_flags |= XFS_INEW; in xfs_iget_recycle()
412 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
415 spin_unlock(&ip->i_flags_lock); in xfs_iget_recycle()
433 struct xfs_inode *ip, in xfs_iget_check_free_state() argument
438 if (VFS_I(ip)->i_mode != 0) { in xfs_iget_check_free_state()
439 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
441 ip->i_ino, VFS_I(ip)->i_mode); in xfs_iget_check_free_state()
442 xfs_agno_mark_sick(ip->i_mount, in xfs_iget_check_free_state()
443 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in xfs_iget_check_free_state()
448 if (ip->i_nblocks != 0) { in xfs_iget_check_free_state()
449 xfs_warn(ip->i_mount, in xfs_iget_check_free_state()
451 ip->i_ino); in xfs_iget_check_free_state()
452 xfs_agno_mark_sick(ip->i_mount, in xfs_iget_check_free_state()
453 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), in xfs_iget_check_free_state()
461 if (VFS_I(ip)->i_mode == 0) in xfs_iget_check_free_state()
514 struct xfs_inode *ip, in xfs_iget_cache_hit() argument
519 struct inode *inode = VFS_I(ip); in xfs_iget_cache_hit()
520 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit()
530 spin_lock(&ip->i_flags_lock); in xfs_iget_cache_hit()
531 if (ip->i_ino != ino) in xfs_iget_cache_hit()
552 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING)) in xfs_iget_cache_hit()
555 if (ip->i_flags & XFS_NEED_INACTIVE) { in xfs_iget_cache_hit()
557 if (VFS_I(ip)->i_nlink == 0) { in xfs_iget_cache_hit()
568 error = xfs_iget_check_free_state(ip, flags); in xfs_iget_cache_hit()
574 (ip->i_flags & XFS_IRECLAIMABLE)) in xfs_iget_cache_hit()
578 if (ip->i_flags & XFS_IRECLAIMABLE) { in xfs_iget_cache_hit()
580 error = xfs_iget_recycle(pag, ip); in xfs_iget_cache_hit()
591 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
593 trace_xfs_iget_hit(ip); in xfs_iget_cache_hit()
597 xfs_ilock(ip, lock_flags); in xfs_iget_cache_hit()
600 xfs_iflags_clear(ip, XFS_ISTALE); in xfs_iget_cache_hit()
606 trace_xfs_iget_skip(ip); in xfs_iget_cache_hit()
610 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
615 spin_unlock(&ip->i_flags_lock); in xfs_iget_cache_hit()
636 struct xfs_inode *ip; in xfs_iget_cache_miss() local
640 ip = xfs_inode_alloc(mp, ino); in xfs_iget_cache_miss()
641 if (!ip) in xfs_iget_cache_miss()
644 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags); in xfs_iget_cache_miss()
660 VFS_I(ip)->i_generation = get_random_u32(); in xfs_iget_cache_miss()
664 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
668 error = xfs_inode_from_disk(ip, in xfs_iget_cache_miss()
669 xfs_buf_offset(bp, ip->i_imap.im_boffset)); in xfs_iget_cache_miss()
673 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); in xfs_iget_cache_miss()
680 trace_xfs_iget_miss(ip); in xfs_iget_cache_miss()
686 error = xfs_iget_check_free_state(ip, flags); in xfs_iget_cache_miss()
705 if (!xfs_ilock_nowait(ip, lock_flags)) in xfs_iget_cache_miss()
714 * The ip->i_flags_lock that protects the XFS_INEW flag forms the in xfs_iget_cache_miss()
719 d_mark_dontcache(VFS_I(ip)); in xfs_iget_cache_miss()
720 ip->i_udquot = NULL; in xfs_iget_cache_miss()
721 ip->i_gdquot = NULL; in xfs_iget_cache_miss()
722 ip->i_pdquot = NULL; in xfs_iget_cache_miss()
723 xfs_iflags_set(ip, XFS_INEW); in xfs_iget_cache_miss()
727 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); in xfs_iget_cache_miss()
737 *ipp = ip; in xfs_iget_cache_miss()
744 xfs_iunlock(ip, lock_flags); in xfs_iget_cache_miss()
746 __destroy_inode(VFS_I(ip)); in xfs_iget_cache_miss()
747 xfs_inode_free(ip); in xfs_iget_cache_miss()
772 struct xfs_inode *ip; in xfs_iget() local
792 ip = radix_tree_lookup(&pag->pag_ici_root, agino); in xfs_iget()
794 if (ip) { in xfs_iget()
795 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); in xfs_iget()
806 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, in xfs_iget()
813 *ipp = ip; in xfs_iget()
820 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) in xfs_iget()
821 xfs_setup_existing_inode(ip); in xfs_iget()
848 struct xfs_inode *ip; in xfs_trans_metafile_iget() local
852 error = xfs_iget(mp, tp, ino, 0, 0, &ip); in xfs_trans_metafile_iget()
858 if (VFS_I(ip)->i_nlink == 0) in xfs_trans_metafile_iget()
865 if (inode_wrong_type(VFS_I(ip), mode)) in xfs_trans_metafile_iget()
868 if (!xfs_is_metadir_inode(ip)) in xfs_trans_metafile_iget()
870 if (metafile_type != ip->i_metatype) in xfs_trans_metafile_iget()
874 *ipp = ip; in xfs_trans_metafile_iget()
877 xfs_irele(ip); in xfs_trans_metafile_iget()
924 struct xfs_inode *ip, in xfs_reclaim_igrab() argument
929 spin_lock(&ip->i_flags_lock); in xfs_reclaim_igrab()
930 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || in xfs_reclaim_igrab()
931 __xfs_iflags_test(ip, XFS_IRECLAIM)) { in xfs_reclaim_igrab()
933 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
938 if (ip->i_sick && in xfs_reclaim_igrab()
940 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
944 __xfs_iflags_set(ip, XFS_IRECLAIM); in xfs_reclaim_igrab()
945 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_igrab()
963 struct xfs_inode *ip, in xfs_reclaim_inode() argument
966 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ in xfs_reclaim_inode()
968 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) in xfs_reclaim_inode()
970 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING)) in xfs_reclaim_inode()
980 if (xlog_is_shutdown(ip->i_mount->m_log)) { in xfs_reclaim_inode()
981 xfs_iunpin_wait(ip); in xfs_reclaim_inode()
982 xfs_iflush_shutdown_abort(ip); in xfs_reclaim_inode()
985 if (xfs_ipincount(ip)) in xfs_reclaim_inode()
987 if (!xfs_inode_clean(ip)) in xfs_reclaim_inode()
990 xfs_iflags_clear(ip, XFS_IFLUSHING); in xfs_reclaim_inode()
992 trace_xfs_inode_reclaiming(ip); in xfs_reclaim_inode()
1004 spin_lock(&ip->i_flags_lock); in xfs_reclaim_inode()
1005 ip->i_flags = XFS_IRECLAIM; in xfs_reclaim_inode()
1006 ip->i_ino = 0; in xfs_reclaim_inode()
1007 ip->i_sick = 0; in xfs_reclaim_inode()
1008 ip->i_checked = 0; in xfs_reclaim_inode()
1009 spin_unlock(&ip->i_flags_lock); in xfs_reclaim_inode()
1011 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL); in xfs_reclaim_inode()
1012 xfs_iunlock(ip, XFS_ILOCK_EXCL); in xfs_reclaim_inode()
1014 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); in xfs_reclaim_inode()
1024 XFS_INO_TO_AGINO(ip->i_mount, ino))) in xfs_reclaim_inode()
1037 xfs_ilock(ip, XFS_ILOCK_EXCL); in xfs_reclaim_inode()
1038 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); in xfs_reclaim_inode()
1039 xfs_iunlock(ip, XFS_ILOCK_EXCL); in xfs_reclaim_inode()
1040 ASSERT(xfs_inode_clean(ip)); in xfs_reclaim_inode()
1042 __xfs_inode_free(ip); in xfs_reclaim_inode()
1046 xfs_iflags_clear(ip, XFS_IFLUSHING); in xfs_reclaim_inode()
1048 xfs_iunlock(ip, XFS_ILOCK_EXCL); in xfs_reclaim_inode()
1050 xfs_iflags_clear(ip, XFS_IRECLAIM); in xfs_reclaim_inode()
1131 struct xfs_inode *ip, in xfs_icwalk_match_id() argument
1135 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id()
1139 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id()
1143 ip->i_projid != icw->icw_prid) in xfs_icwalk_match_id()
1155 struct xfs_inode *ip, in xfs_icwalk_match_id_union() argument
1159 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) in xfs_icwalk_match_id_union()
1163 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) in xfs_icwalk_match_id_union()
1167 ip->i_projid == icw->icw_prid) in xfs_icwalk_match_id_union()
1174 * Is this inode @ip eligible for eof/cow block reclamation, given some
1180 struct xfs_inode *ip, in xfs_icwalk_match() argument
1189 match = xfs_icwalk_match_id_union(ip, icw); in xfs_icwalk_match()
1191 match = xfs_icwalk_match_id(ip, icw); in xfs_icwalk_match()
1197 XFS_ISIZE(ip) < icw->icw_min_file_size) in xfs_icwalk_match()
1222 struct xfs_inode *ip, in xfs_inode_free_eofblocks() argument
1230 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS)) in xfs_inode_free_eofblocks()
1237 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) in xfs_inode_free_eofblocks()
1240 if (!xfs_icwalk_match(ip, icw)) in xfs_inode_free_eofblocks()
1247 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { in xfs_inode_free_eofblocks()
1254 if (xfs_can_free_eofblocks(ip)) in xfs_inode_free_eofblocks()
1255 return xfs_free_eofblocks(ip); in xfs_inode_free_eofblocks()
1258 trace_xfs_inode_free_eofblocks_invalid(ip); in xfs_inode_free_eofblocks()
1259 xfs_inode_clear_eofblocks_tag(ip); in xfs_inode_free_eofblocks()
1265 struct xfs_inode *ip, in xfs_blockgc_set_iflag() argument
1268 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag()
1277 if (ip->i_flags & iflag) in xfs_blockgc_set_iflag()
1279 spin_lock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1280 ip->i_flags |= iflag; in xfs_blockgc_set_iflag()
1281 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_set_iflag()
1283 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1286 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1295 xfs_inode_t *ip) in xfs_inode_set_eofblocks_tag() argument
1297 trace_xfs_inode_set_eofblocks_tag(ip); in xfs_inode_set_eofblocks_tag()
1298 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS); in xfs_inode_set_eofblocks_tag()
1303 struct xfs_inode *ip, in xfs_blockgc_clear_iflag() argument
1306 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag()
1312 spin_lock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1313 ip->i_flags &= ~iflag; in xfs_blockgc_clear_iflag()
1314 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; in xfs_blockgc_clear_iflag()
1315 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_clear_iflag()
1320 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1323 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1332 xfs_inode_t *ip) in xfs_inode_clear_eofblocks_tag() argument
1334 trace_xfs_inode_clear_eofblocks_tag(ip); in xfs_inode_clear_eofblocks_tag()
1335 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS); in xfs_inode_clear_eofblocks_tag()
1343 struct xfs_inode *ip, in xfs_prep_free_cowblocks() argument
1354 if (!xfs_inode_has_cow_data(ip)) { in xfs_prep_free_cowblocks()
1355 trace_xfs_inode_free_cowblocks_invalid(ip); in xfs_prep_free_cowblocks()
1356 xfs_inode_clear_cowblocks_tag(ip); in xfs_prep_free_cowblocks()
1374 if (!sync && inode_is_open_for_write(VFS_I(ip))) in xfs_prep_free_cowblocks()
1376 return xfs_can_free_cowblocks(ip); in xfs_prep_free_cowblocks()
1393 struct xfs_inode *ip, in xfs_inode_free_cowblocks() argument
1402 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS)) in xfs_inode_free_cowblocks()
1405 if (!xfs_prep_free_cowblocks(ip, icw)) in xfs_inode_free_cowblocks()
1408 if (!xfs_icwalk_match(ip, icw)) in xfs_inode_free_cowblocks()
1416 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { in xfs_inode_free_cowblocks()
1423 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) { in xfs_inode_free_cowblocks()
1434 if (xfs_prep_free_cowblocks(ip, icw)) in xfs_inode_free_cowblocks()
1435 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); in xfs_inode_free_cowblocks()
1441 xfs_inode_t *ip) in xfs_inode_set_cowblocks_tag() argument
1443 trace_xfs_inode_set_cowblocks_tag(ip); in xfs_inode_set_cowblocks_tag()
1444 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS); in xfs_inode_set_cowblocks_tag()
1449 xfs_inode_t *ip) in xfs_inode_clear_cowblocks_tag() argument
1451 trace_xfs_inode_clear_cowblocks_tag(ip); in xfs_inode_clear_cowblocks_tag()
1452 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS); in xfs_inode_clear_cowblocks_tag()
1492 * Decide if the given @ip is eligible for garbage collection of speculative
1498 struct xfs_inode *ip) in xfs_blockgc_igrab() argument
1500 struct inode *inode = VFS_I(ip); in xfs_blockgc_igrab()
1505 spin_lock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1506 if (!ip->i_ino) in xfs_blockgc_igrab()
1509 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) in xfs_blockgc_igrab()
1511 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1514 if (xfs_is_shutdown(ip->i_mount)) in xfs_blockgc_igrab()
1525 spin_unlock(&ip->i_flags_lock); in xfs_blockgc_igrab()
1532 struct xfs_inode *ip, in xfs_blockgc_scan_inode() argument
1538 error = xfs_inode_free_eofblocks(ip, icw, &lockflags); in xfs_blockgc_scan_inode()
1542 error = xfs_inode_free_cowblocks(ip, icw, &lockflags); in xfs_blockgc_scan_inode()
1545 xfs_iunlock(ip, lockflags); in xfs_blockgc_scan_inode()
1546 xfs_irele(ip); in xfs_blockgc_scan_inode()
1671 struct xfs_inode *ip, in xfs_blockgc_free_quota() argument
1674 return xfs_blockgc_free_dquots(ip->i_mount, in xfs_blockgc_free_quota()
1675 xfs_inode_dquot(ip, XFS_DQTYPE_USER), in xfs_blockgc_free_quota()
1676 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), in xfs_blockgc_free_quota()
1677 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags); in xfs_blockgc_free_quota()
1698 struct xfs_inode *ip, in xfs_icwalk_igrab() argument
1703 return xfs_blockgc_igrab(ip); in xfs_icwalk_igrab()
1705 return xfs_reclaim_igrab(ip, icw); in xfs_icwalk_igrab()
1718 struct xfs_inode *ip, in xfs_icwalk_process_inode() argument
1726 error = xfs_blockgc_scan_inode(ip, icw); in xfs_icwalk_process_inode()
1729 xfs_reclaim_inode(ip, pag); in xfs_icwalk_process_inode()
1781 struct xfs_inode *ip = batch[i]; in xfs_icwalk_ag() local
1783 if (done || !xfs_icwalk_igrab(goal, ip, icw)) in xfs_icwalk_ag()
1798 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) in xfs_icwalk_ag()
1800 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1801 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1875 struct xfs_inode *ip, in xfs_check_delalloc() argument
1878 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); in xfs_check_delalloc()
1882 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) in xfs_check_delalloc()
1886 xfs_warn(ip->i_mount, in xfs_check_delalloc()
1888 ip->i_ino, in xfs_check_delalloc()
1895 #define xfs_check_delalloc(ip, whichfork) do { } while (0) argument
1901 struct xfs_inode *ip) in xfs_inodegc_set_reclaimable() argument
1903 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable()
1906 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1907 xfs_check_delalloc(ip, XFS_DATA_FORK); in xfs_inodegc_set_reclaimable()
1908 xfs_check_delalloc(ip, XFS_COW_FORK); in xfs_inodegc_set_reclaimable()
1912 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1914 spin_lock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1916 trace_xfs_inode_set_reclaimable(ip); in xfs_inodegc_set_reclaimable()
1917 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING); in xfs_inodegc_set_reclaimable()
1918 ip->i_flags |= XFS_IRECLAIMABLE; in xfs_inodegc_set_reclaimable()
1919 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1922 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_set_reclaimable()
1934 struct xfs_inode *ip) in xfs_inodegc_inactivate() argument
1938 trace_xfs_inode_inactivating(ip); in xfs_inodegc_inactivate()
1939 error = xfs_inactive(ip); in xfs_inodegc_inactivate()
1940 xfs_inodegc_set_reclaimable(ip); in xfs_inodegc_inactivate()
1952 struct xfs_inode *ip, *n; in xfs_inodegc_worker() local
1977 ip = llist_entry(node, struct xfs_inode, i_gclist); in xfs_inodegc_worker()
1981 llist_for_each_entry_safe(ip, n, node, i_gclist) { in xfs_inodegc_worker()
1984 xfs_iflags_set(ip, XFS_INACTIVATING); in xfs_inodegc_worker()
1985 error = xfs_inodegc_inactivate(ip); in xfs_inodegc_worker()
2072 struct xfs_inode *ip) in xfs_inodegc_want_queue_rt_file() argument
2074 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file()
2076 if (!XFS_IS_REALTIME_INODE(ip)) in xfs_inodegc_want_queue_rt_file()
2087 # define xfs_inodegc_want_queue_rt_file(ip) (false) argument
2099 struct xfs_inode *ip, in xfs_inodegc_want_queue_work() argument
2102 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work()
2112 if (xfs_inodegc_want_queue_rt_file(ip)) in xfs_inodegc_want_queue_work()
2115 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER)) in xfs_inodegc_want_queue_work()
2118 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP)) in xfs_inodegc_want_queue_work()
2121 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ)) in xfs_inodegc_want_queue_work()
2146 struct xfs_inode *ip, in xfs_inodegc_want_flush_work() argument
2169 struct xfs_inode *ip) in xfs_inodegc_queue() argument
2171 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue()
2178 trace_xfs_inode_set_need_inactive(ip); in xfs_inodegc_queue()
2179 spin_lock(&ip->i_flags_lock); in xfs_inodegc_queue()
2180 ip->i_flags |= XFS_NEED_INACTIVE; in xfs_inodegc_queue()
2181 spin_unlock(&ip->i_flags_lock); in xfs_inodegc_queue()
2185 llist_add(&ip->i_gclist, &gc->list); in xfs_inodegc_queue()
2208 if (xfs_inodegc_want_queue_work(ip, items)) in xfs_inodegc_queue()
2216 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { in xfs_inodegc_queue()
2234 struct xfs_inode *ip) in xfs_inode_mark_reclaimable() argument
2236 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable()
2244 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS)); in xfs_inode_mark_reclaimable()
2246 need_inactive = xfs_inode_needs_inactive(ip); in xfs_inode_mark_reclaimable()
2248 xfs_inodegc_queue(ip); in xfs_inode_mark_reclaimable()
2253 xfs_qm_dqdetach(ip); in xfs_inode_mark_reclaimable()
2254 xfs_inodegc_set_reclaimable(ip); in xfs_inode_mark_reclaimable()