1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_qm.h"
18 #include "xfs_error.h"
19 #include "xfs_health.h"
20 #include "xfs_metadir.h"
21 #include "xfs_metafile.h"
22
23 int
xfs_calc_dquots_per_chunk(unsigned int nbblks)24 xfs_calc_dquots_per_chunk(
25 unsigned int nbblks) /* basic block units */
26 {
27 ASSERT(nbblks > 0);
28 return BBTOB(nbblks) / sizeof(struct xfs_dqblk);
29 }
30
31 /*
32 * Do some primitive error checking on ondisk dquot data structures.
33 *
34 * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure;
35 * we verify them separately because at some points we have only the
36 * smaller xfs_disk_dquot structure available.
37 */
38
39 xfs_failaddr_t
xfs_dquot_verify(struct xfs_mount * mp,struct xfs_disk_dquot * ddq,xfs_dqid_t id)40 xfs_dquot_verify(
41 struct xfs_mount *mp,
42 struct xfs_disk_dquot *ddq,
43 xfs_dqid_t id) /* used only during quotacheck */
44 {
45 __u8 ddq_type;
46
47 /*
48 * We can encounter an uninitialized dquot buffer for 2 reasons:
49 * 1. If we crash while deleting the quotainode(s), and those blks got
50 * used for user data. This is because we take the path of regular
51 * file deletion; however, the size field of quotainodes is never
52 * updated, so all the tricks that we play in itruncate_finish
53 * don't quite matter.
54 *
55 * 2. We don't play the quota buffers when there's a quotaoff logitem.
56 * But the allocation will be replayed so we'll end up with an
57 * uninitialized quota block.
58 *
59 * This is all fine; things are still consistent, and we haven't lost
60 * any quota information. Just don't complain about bad dquot blks.
61 */
62 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
63 return __this_address;
64 if (ddq->d_version != XFS_DQUOT_VERSION)
65 return __this_address;
66
67 if (ddq->d_type & ~XFS_DQTYPE_ANY)
68 return __this_address;
69 ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK;
70 if (ddq_type != XFS_DQTYPE_USER &&
71 ddq_type != XFS_DQTYPE_PROJ &&
72 ddq_type != XFS_DQTYPE_GROUP)
73 return __this_address;
74
75 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) &&
76 !xfs_has_bigtime(mp))
77 return __this_address;
78
79 if ((ddq->d_type & XFS_DQTYPE_BIGTIME) && !ddq->d_id)
80 return __this_address;
81
82 if (id != -1 && id != be32_to_cpu(ddq->d_id))
83 return __this_address;
84
85 if (!ddq->d_id)
86 return NULL;
87
88 if (ddq->d_blk_softlimit &&
89 be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
90 !ddq->d_btimer)
91 return __this_address;
92
93 if (ddq->d_ino_softlimit &&
94 be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
95 !ddq->d_itimer)
96 return __this_address;
97
98 if (ddq->d_rtb_softlimit &&
99 be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
100 !ddq->d_rtbtimer)
101 return __this_address;
102
103 return NULL;
104 }
105
106 xfs_failaddr_t
xfs_dqblk_verify(struct xfs_mount * mp,struct xfs_dqblk * dqb,xfs_dqid_t id)107 xfs_dqblk_verify(
108 struct xfs_mount *mp,
109 struct xfs_dqblk *dqb,
110 xfs_dqid_t id) /* used only during quotacheck */
111 {
112 if (xfs_has_crc(mp) &&
113 !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid))
114 return __this_address;
115
116 return xfs_dquot_verify(mp, &dqb->dd_diskdq, id);
117 }
118
119 /*
120 * Do some primitive error checking on ondisk dquot data structures.
121 */
122 void
xfs_dqblk_repair(struct xfs_mount * mp,struct xfs_dqblk * dqb,xfs_dqid_t id,xfs_dqtype_t type)123 xfs_dqblk_repair(
124 struct xfs_mount *mp,
125 struct xfs_dqblk *dqb,
126 xfs_dqid_t id,
127 xfs_dqtype_t type)
128 {
129 /*
130 * Typically, a repair is only requested by quotacheck.
131 */
132 ASSERT(id != -1);
133 memset(dqb, 0, sizeof(struct xfs_dqblk));
134
135 dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
136 dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION;
137 dqb->dd_diskdq.d_type = type;
138 dqb->dd_diskdq.d_id = cpu_to_be32(id);
139
140 if (xfs_has_crc(mp)) {
141 uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid);
142 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
143 XFS_DQUOT_CRC_OFF);
144 }
145 }
146
147 STATIC bool
xfs_dquot_buf_verify_crc(struct xfs_mount * mp,struct xfs_buf * bp,bool readahead)148 xfs_dquot_buf_verify_crc(
149 struct xfs_mount *mp,
150 struct xfs_buf *bp,
151 bool readahead)
152 {
153 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
154 int ndquots;
155 int i;
156
157 if (!xfs_has_crc(mp))
158 return true;
159
160 /*
161 * if we are in log recovery, the quota subsystem has not been
162 * initialised so we have no quotainfo structure. In that case, we need
163 * to manually calculate the number of dquots in the buffer.
164 */
165 if (mp->m_quotainfo)
166 ndquots = mp->m_quotainfo->qi_dqperchunk;
167 else
168 ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
169
170 for (i = 0; i < ndquots; i++, d++) {
171 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
172 XFS_DQUOT_CRC_OFF)) {
173 if (!readahead)
174 xfs_buf_verifier_error(bp, -EFSBADCRC, __func__,
175 d, sizeof(*d), __this_address);
176 return false;
177 }
178 }
179 return true;
180 }
181
182 STATIC xfs_failaddr_t
xfs_dquot_buf_verify(struct xfs_mount * mp,struct xfs_buf * bp,bool readahead)183 xfs_dquot_buf_verify(
184 struct xfs_mount *mp,
185 struct xfs_buf *bp,
186 bool readahead)
187 {
188 struct xfs_dqblk *dqb = bp->b_addr;
189 xfs_failaddr_t fa;
190 xfs_dqid_t id = 0;
191 int ndquots;
192 int i;
193
194 /*
195 * if we are in log recovery, the quota subsystem has not been
196 * initialised so we have no quotainfo structure. In that case, we need
197 * to manually calculate the number of dquots in the buffer.
198 */
199 if (mp->m_quotainfo)
200 ndquots = mp->m_quotainfo->qi_dqperchunk;
201 else
202 ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
203
204 /*
205 * On the first read of the buffer, verify that each dquot is valid.
206 * We don't know what the id of the dquot is supposed to be, just that
207 * they should be increasing monotonically within the buffer. If the
208 * first id is corrupt, then it will fail on the second dquot in the
209 * buffer so corruptions could point to the wrong dquot in this case.
210 */
211 for (i = 0; i < ndquots; i++) {
212 struct xfs_disk_dquot *ddq;
213
214 ddq = &dqb[i].dd_diskdq;
215
216 if (i == 0)
217 id = be32_to_cpu(ddq->d_id);
218
219 fa = xfs_dqblk_verify(mp, &dqb[i], id + i);
220 if (fa) {
221 if (!readahead)
222 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
223 __func__, &dqb[i],
224 sizeof(struct xfs_dqblk), fa);
225 return fa;
226 }
227 }
228
229 return NULL;
230 }
231
232 static xfs_failaddr_t
xfs_dquot_buf_verify_struct(struct xfs_buf * bp)233 xfs_dquot_buf_verify_struct(
234 struct xfs_buf *bp)
235 {
236 struct xfs_mount *mp = bp->b_mount;
237
238 return xfs_dquot_buf_verify(mp, bp, false);
239 }
240
241 static void
xfs_dquot_buf_read_verify(struct xfs_buf * bp)242 xfs_dquot_buf_read_verify(
243 struct xfs_buf *bp)
244 {
245 struct xfs_mount *mp = bp->b_mount;
246
247 if (!xfs_dquot_buf_verify_crc(mp, bp, false))
248 return;
249 xfs_dquot_buf_verify(mp, bp, false);
250 }
251
252 /*
253 * readahead errors are silent and simply leave the buffer as !done so a real
254 * read will then be run with the xfs_dquot_buf_ops verifier. See
255 * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
256 * reporting the failure.
257 */
258 static void
xfs_dquot_buf_readahead_verify(struct xfs_buf * bp)259 xfs_dquot_buf_readahead_verify(
260 struct xfs_buf *bp)
261 {
262 struct xfs_mount *mp = bp->b_mount;
263
264 if (!xfs_dquot_buf_verify_crc(mp, bp, true) ||
265 xfs_dquot_buf_verify(mp, bp, true) != NULL) {
266 xfs_buf_ioerror(bp, -EIO);
267 bp->b_flags &= ~XBF_DONE;
268 }
269 }
270
271 /*
272 * we don't calculate the CRC here as that is done when the dquot is flushed to
273 * the buffer after the update is done. This ensures that the dquot in the
274 * buffer always has an up-to-date CRC value.
275 */
276 static void
xfs_dquot_buf_write_verify(struct xfs_buf * bp)277 xfs_dquot_buf_write_verify(
278 struct xfs_buf *bp)
279 {
280 struct xfs_mount *mp = bp->b_mount;
281
282 xfs_dquot_buf_verify(mp, bp, false);
283 }
284
285 const struct xfs_buf_ops xfs_dquot_buf_ops = {
286 .name = "xfs_dquot",
287 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
288 cpu_to_be16(XFS_DQUOT_MAGIC) },
289 .verify_read = xfs_dquot_buf_read_verify,
290 .verify_write = xfs_dquot_buf_write_verify,
291 .verify_struct = xfs_dquot_buf_verify_struct,
292 };
293
294 const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
295 .name = "xfs_dquot_ra",
296 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC),
297 cpu_to_be16(XFS_DQUOT_MAGIC) },
298 .verify_read = xfs_dquot_buf_readahead_verify,
299 .verify_write = xfs_dquot_buf_write_verify,
300 };
301
302 /* Convert an on-disk timer value into an incore timer value. */
303 time64_t
xfs_dquot_from_disk_ts(struct xfs_disk_dquot * ddq,__be32 dtimer)304 xfs_dquot_from_disk_ts(
305 struct xfs_disk_dquot *ddq,
306 __be32 dtimer)
307 {
308 uint32_t t = be32_to_cpu(dtimer);
309
310 if (t != 0 && (ddq->d_type & XFS_DQTYPE_BIGTIME))
311 return xfs_dq_bigtime_to_unix(t);
312
313 return t;
314 }
315
316 /* Convert an incore timer value into an on-disk timer value. */
317 __be32
xfs_dquot_to_disk_ts(struct xfs_dquot * dqp,time64_t timer)318 xfs_dquot_to_disk_ts(
319 struct xfs_dquot *dqp,
320 time64_t timer)
321 {
322 uint32_t t = timer;
323
324 if (timer != 0 && (dqp->q_type & XFS_DQTYPE_BIGTIME))
325 t = xfs_dq_unix_to_bigtime(timer);
326
327 return cpu_to_be32(t);
328 }
329
330 inline unsigned int
xfs_dqinode_sick_mask(xfs_dqtype_t type)331 xfs_dqinode_sick_mask(xfs_dqtype_t type)
332 {
333 switch (type) {
334 case XFS_DQTYPE_USER:
335 return XFS_SICK_FS_UQUOTA;
336 case XFS_DQTYPE_GROUP:
337 return XFS_SICK_FS_GQUOTA;
338 case XFS_DQTYPE_PROJ:
339 return XFS_SICK_FS_PQUOTA;
340 }
341
342 ASSERT(0);
343 return 0;
344 }
345
346 /*
347 * Load the inode for a given type of quota, assuming that the sb fields have
348 * been sorted out. This is not true when switching quota types on a V4
349 * filesystem, so do not use this function for that. If metadir is enabled,
350 * @dp must be the /quota metadir.
351 *
352 * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on
353 * success; or a negative errno.
354 */
355 int
xfs_dqinode_load(struct xfs_trans * tp,struct xfs_inode * dp,xfs_dqtype_t type,struct xfs_inode ** ipp)356 xfs_dqinode_load(
357 struct xfs_trans *tp,
358 struct xfs_inode *dp,
359 xfs_dqtype_t type,
360 struct xfs_inode **ipp)
361 {
362 struct xfs_mount *mp = tp->t_mountp;
363 struct xfs_inode *ip;
364 enum xfs_metafile_type metafile_type = xfs_dqinode_metafile_type(type);
365 int error;
366
367 if (!xfs_has_metadir(mp)) {
368 xfs_ino_t ino;
369
370 switch (type) {
371 case XFS_DQTYPE_USER:
372 ino = mp->m_sb.sb_uquotino;
373 break;
374 case XFS_DQTYPE_GROUP:
375 ino = mp->m_sb.sb_gquotino;
376 break;
377 case XFS_DQTYPE_PROJ:
378 ino = mp->m_sb.sb_pquotino;
379 break;
380 default:
381 ASSERT(0);
382 return -EFSCORRUPTED;
383 }
384
385 /* Should have set 0 to NULLFSINO when loading superblock */
386 if (ino == NULLFSINO)
387 return -ENOENT;
388
389 error = xfs_trans_metafile_iget(tp, ino, metafile_type, &ip);
390 } else {
391 error = xfs_metadir_load(tp, dp, xfs_dqinode_path(type),
392 metafile_type, &ip);
393 if (error == -ENOENT)
394 return error;
395 }
396 if (error) {
397 if (xfs_metadata_is_sick(error))
398 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
399 return error;
400 }
401
402 if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
403 ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) {
404 xfs_irele(ip);
405 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
406 return -EFSCORRUPTED;
407 }
408
409 if (XFS_IS_CORRUPT(mp, ip->i_projid != 0)) {
410 xfs_irele(ip);
411 xfs_fs_mark_sick(mp, xfs_dqinode_sick_mask(type));
412 return -EFSCORRUPTED;
413 }
414
415 *ipp = ip;
416 return 0;
417 }
418
419 /* Create a metadata directory quota inode. */
420 int
xfs_dqinode_metadir_create(struct xfs_inode * dp,xfs_dqtype_t type,struct xfs_inode ** ipp)421 xfs_dqinode_metadir_create(
422 struct xfs_inode *dp,
423 xfs_dqtype_t type,
424 struct xfs_inode **ipp)
425 {
426 struct xfs_metadir_update upd = {
427 .dp = dp,
428 .metafile_type = xfs_dqinode_metafile_type(type),
429 .path = xfs_dqinode_path(type),
430 };
431 int error;
432
433 error = xfs_metadir_start_create(&upd);
434 if (error)
435 return error;
436
437 error = xfs_metadir_create(&upd, S_IFREG);
438 if (error)
439 return error;
440
441 xfs_trans_log_inode(upd.tp, upd.ip, XFS_ILOG_CORE);
442
443 error = xfs_metadir_commit(&upd);
444 if (error)
445 return error;
446
447 xfs_finish_inode_setup(upd.ip);
448 *ipp = upd.ip;
449 return 0;
450 }
451
452 #ifndef __KERNEL__
453 /* Link a metadata directory quota inode. */
454 int
xfs_dqinode_metadir_link(struct xfs_inode * dp,xfs_dqtype_t type,struct xfs_inode * ip)455 xfs_dqinode_metadir_link(
456 struct xfs_inode *dp,
457 xfs_dqtype_t type,
458 struct xfs_inode *ip)
459 {
460 struct xfs_metadir_update upd = {
461 .dp = dp,
462 .metafile_type = xfs_dqinode_metafile_type(type),
463 .path = xfs_dqinode_path(type),
464 .ip = ip,
465 };
466 int error;
467
468 error = xfs_metadir_start_link(&upd);
469 if (error)
470 return error;
471
472 error = xfs_metadir_link(&upd);
473 if (error)
474 return error;
475
476 xfs_trans_log_inode(upd.tp, upd.ip, XFS_ILOG_CORE);
477
478 return xfs_metadir_commit(&upd);
479 }
480 #endif /* __KERNEL__ */
481
482 /* Create the parent directory for all quota inodes and load it. */
483 int
xfs_dqinode_mkdir_parent(struct xfs_mount * mp,struct xfs_inode ** dpp)484 xfs_dqinode_mkdir_parent(
485 struct xfs_mount *mp,
486 struct xfs_inode **dpp)
487 {
488 if (!mp->m_metadirip) {
489 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
490 return -EFSCORRUPTED;
491 }
492
493 return xfs_metadir_mkdir(mp->m_metadirip, "quota", dpp);
494 }
495
496 /*
497 * Load the parent directory of all quota inodes. Pass the inode to the caller
498 * because quota functions (e.g. QUOTARM) can be called on the quota files even
499 * if quotas are not enabled.
500 */
501 int
xfs_dqinode_load_parent(struct xfs_trans * tp,struct xfs_inode ** dpp)502 xfs_dqinode_load_parent(
503 struct xfs_trans *tp,
504 struct xfs_inode **dpp)
505 {
506 struct xfs_mount *mp = tp->t_mountp;
507
508 if (!mp->m_metadirip) {
509 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
510 return -EFSCORRUPTED;
511 }
512
513 return xfs_metadir_load(tp, mp->m_metadirip, "quota", XFS_METAFILE_DIR,
514 dpp);
515 }
516