1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2018 Red Hat, Inc.
4 */
5 #ifndef __LIBXFS_GROUP_H
6 #define __LIBXFS_GROUP_H 1
7
8 struct xfs_group {
9 struct xfs_mount *xg_mount;
10 uint32_t xg_gno;
11 enum xfs_group_type xg_type;
12 atomic_t xg_ref; /* passive reference count */
13 atomic_t xg_active_ref; /* active reference count */
14
15 /* Precalculated geometry info */
16 uint32_t xg_block_count; /* max usable gbno */
17 uint32_t xg_min_gbno; /* min usable gbno */
18
19 #ifdef __KERNEL__
20 /* -- kernel only structures below this line -- */
21
22 /*
23 * Track freed but not yet committed extents.
24 */
25 struct xfs_extent_busy_tree *xg_busy_extents;
26
27 /*
28 * Bitsets of per-ag metadata that have been checked and/or are sick.
29 * Callers should hold xg_state_lock before accessing this field.
30 */
31 uint16_t xg_checked;
32 uint16_t xg_sick;
33 spinlock_t xg_state_lock;
34
35 /*
36 * We use xfs_drain to track the number of deferred log intent items
37 * that have been queued (but not yet processed) so that waiters (e.g.
38 * scrub) will not lock resources when other threads are in the middle
39 * of processing a chain of intent items only to find momentary
40 * inconsistencies.
41 */
42 struct xfs_defer_drain xg_intents_drain;
43
44 /*
45 * Hook to feed rmapbt updates to an active online repair.
46 */
47 struct xfs_hooks xg_rmap_update_hooks;
48 #endif /* __KERNEL__ */
49 };
50
51 struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index,
52 enum xfs_group_type type);
53 struct xfs_group *xfs_group_get_by_fsb(struct xfs_mount *mp,
54 xfs_fsblock_t fsbno, enum xfs_group_type type);
55 struct xfs_group *xfs_group_hold(struct xfs_group *xg);
56 void xfs_group_put(struct xfs_group *xg);
57
58 struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index,
59 enum xfs_group_type type);
60 struct xfs_group *xfs_group_next_range(struct xfs_mount *mp,
61 struct xfs_group *xg, uint32_t start_index, uint32_t end_index,
62 enum xfs_group_type type);
63 struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp,
64 struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type);
65 void xfs_group_rele(struct xfs_group *xg);
66
67 void xfs_group_free(struct xfs_mount *mp, uint32_t index,
68 enum xfs_group_type type, void (*uninit)(struct xfs_group *xg));
69 int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg,
70 uint32_t index, enum xfs_group_type);
71
72 #define xfs_group_set_mark(_xg, _mark) \
73 xa_set_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
74 (_xg)->xg_gno, (_mark))
75 #define xfs_group_clear_mark(_xg, _mark) \
76 xa_clear_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \
77 (_xg)->xg_gno, (_mark))
78 #define xfs_group_marked(_mp, _type, _mark) \
79 xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark))
80
81 static inline xfs_agblock_t
xfs_group_max_blocks(struct xfs_group * xg)82 xfs_group_max_blocks(
83 struct xfs_group *xg)
84 {
85 return xg->xg_mount->m_groups[xg->xg_type].blocks;
86 }
87
88 static inline xfs_fsblock_t
xfs_group_start_fsb(struct xfs_group * xg)89 xfs_group_start_fsb(
90 struct xfs_group *xg)
91 {
92 return ((xfs_fsblock_t)xg->xg_gno) <<
93 xg->xg_mount->m_groups[xg->xg_type].blklog;
94 }
95
96 static inline xfs_fsblock_t
xfs_gbno_to_fsb(struct xfs_group * xg,xfs_agblock_t gbno)97 xfs_gbno_to_fsb(
98 struct xfs_group *xg,
99 xfs_agblock_t gbno)
100 {
101 return xfs_group_start_fsb(xg) | gbno;
102 }
103
104 static inline xfs_daddr_t
xfs_gbno_to_daddr(struct xfs_group * xg,xfs_agblock_t gbno)105 xfs_gbno_to_daddr(
106 struct xfs_group *xg,
107 xfs_agblock_t gbno)
108 {
109 struct xfs_mount *mp = xg->xg_mount;
110 uint32_t blocks = mp->m_groups[xg->xg_type].blocks;
111
112 return XFS_FSB_TO_BB(mp, (xfs_fsblock_t)xg->xg_gno * blocks + gbno);
113 }
114
115 static inline uint32_t
xfs_fsb_to_gno(struct xfs_mount * mp,xfs_fsblock_t fsbno,enum xfs_group_type type)116 xfs_fsb_to_gno(
117 struct xfs_mount *mp,
118 xfs_fsblock_t fsbno,
119 enum xfs_group_type type)
120 {
121 if (!mp->m_groups[type].blklog)
122 return 0;
123 return fsbno >> mp->m_groups[type].blklog;
124 }
125
126 static inline xfs_agblock_t
xfs_fsb_to_gbno(struct xfs_mount * mp,xfs_fsblock_t fsbno,enum xfs_group_type type)127 xfs_fsb_to_gbno(
128 struct xfs_mount *mp,
129 xfs_fsblock_t fsbno,
130 enum xfs_group_type type)
131 {
132 return fsbno & mp->m_groups[type].blkmask;
133 }
134
135 static inline bool
xfs_verify_gbno(struct xfs_group * xg,uint32_t gbno)136 xfs_verify_gbno(
137 struct xfs_group *xg,
138 uint32_t gbno)
139 {
140 if (gbno >= xg->xg_block_count)
141 return false;
142 if (gbno < xg->xg_min_gbno)
143 return false;
144 return true;
145 }
146
147 static inline bool
xfs_verify_gbext(struct xfs_group * xg,uint32_t gbno,uint32_t glen)148 xfs_verify_gbext(
149 struct xfs_group *xg,
150 uint32_t gbno,
151 uint32_t glen)
152 {
153 uint32_t end;
154
155 if (!xfs_verify_gbno(xg, gbno))
156 return false;
157 if (glen == 0 || check_add_overflow(gbno, glen - 1, &end))
158 return false;
159 if (!xfs_verify_gbno(xg, end))
160 return false;
161 return true;
162 }
163
164 #endif /* __LIBXFS_GROUP_H */
165