1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
14 #include "xfs_log_format.h"
15 #include "xfs_trans.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
19 #include "xfs_bmap_btree.h"
21 #include "xfs_rmap_btree.h"
22 #include "scrub/scrub.h"
23 #include "scrub/common.h"
24 #include "scrub/btree.h"
27 /* Set us up with an inode's bmap. */
29 xchk_setup_inode_bmap(
34 error = xchk_get_inode(sc);
38 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
39 xfs_ilock(sc->ip, sc->ilock_flags);
42 * We don't want any ephemeral data fork updates sitting around
43 * while we inspect block mappings, so wait for directio to finish
44 * and flush dirty data if we have delalloc reservations.
46 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
47 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
48 struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
50 inode_dio_wait(VFS_I(sc->ip));
53 * Try to flush all incore state to disk before we examine the
54 * space mappings for the data fork. Leave accumulated errors
55 * in the mapping for the writer threads to consume.
57 * On ENOSPC or EIO writeback errors, we continue into the
58 * extent mapping checks because write failures do not
59 * necessarily imply anything about the correctness of the file
60 * metadata. The metadata and the file data could be on
61 * completely separate devices; a media failure might only
62 * affect a subset of the disk, etc. We can handle delalloc
63 * extents in the scrubber, so leaving them in memory is fine.
65 error = filemap_fdatawrite(mapping);
67 error = filemap_fdatawait_keep_errors(mapping);
68 if (error && (error != -ENOSPC && error != -EIO))
72 /* Got the inode, lock it and we're ready to go. */
73 error = xchk_trans_alloc(sc, 0);
76 sc->ilock_flags |= XFS_ILOCK_EXCL;
77 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
80 /* scrub teardown will unlock and release the inode */
85 * Inode fork block mapping (BMBT) scrubber.
86 * More complex than the others because we have to scrub
87 * all the extents regardless of whether or not the fork
91 struct xchk_bmap_info {
93 struct xfs_iext_cursor icur;
94 xfs_fileoff_t lastoff;
101 /* Look for a corresponding rmap for this irec. */
104 struct xchk_bmap_info *info,
105 struct xfs_bmbt_irec *irec,
108 struct xfs_rmap_irec *rmap)
110 xfs_fileoff_t offset;
111 unsigned int rflags = 0;
115 if (info->whichfork == XFS_ATTR_FORK)
116 rflags |= XFS_RMAP_ATTR_FORK;
117 if (irec->br_state == XFS_EXT_UNWRITTEN)
118 rflags |= XFS_RMAP_UNWRITTEN;
121 * CoW staging extents are owned (on disk) by the refcountbt, so
122 * their rmaps do not have offsets.
124 if (info->whichfork == XFS_COW_FORK)
127 offset = irec->br_startoff;
130 * If the caller thinks this could be a shared bmbt extent (IOWs,
131 * any data fork extent of a reflink inode) then we have to use the
132 * range rmap lookup to make sure we get the correct owner/offset.
134 if (info->is_shared) {
135 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
136 owner, offset, rflags, rmap, &has_rmap);
138 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno,
139 owner, offset, rflags, rmap, &has_rmap);
141 if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur))
145 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
152 struct xchk_bmap_info *info,
153 struct xfs_bmbt_irec *irec)
155 struct xfs_bmbt_irec got;
156 struct xfs_ifork *ifp;
158 ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
160 if (!xfs_iext_peek_prev_extent(ifp, &info->icur, &got))
162 if (got.br_startoff + got.br_blockcount != irec->br_startoff)
164 if (got.br_startblock + got.br_blockcount != irec->br_startblock)
166 if (got.br_state != irec->br_state)
173 struct xchk_bmap_info *info,
174 struct xfs_bmbt_irec *irec)
176 struct xfs_bmbt_irec got;
177 struct xfs_ifork *ifp;
179 ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
181 if (!xfs_iext_peek_next_extent(ifp, &info->icur, &got))
183 if (irec->br_startoff + irec->br_blockcount != got.br_startoff)
185 if (irec->br_startblock + irec->br_blockcount != got.br_startblock)
187 if (got.br_state != irec->br_state)
192 /* Make sure that we have rmapbt records for this extent. */
195 struct xchk_bmap_info *info,
196 struct xfs_bmbt_irec *irec,
199 struct xfs_rmap_irec rmap;
200 unsigned long long rmap_end;
203 if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
206 if (info->whichfork == XFS_COW_FORK)
207 owner = XFS_RMAP_OWN_COW;
209 owner = info->sc->ip->i_ino;
211 /* Find the rmap record for this irec. */
212 if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
215 /* Check the rmap. */
216 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
217 if (rmap.rm_startblock > agbno ||
218 agbno + irec->br_blockcount > rmap_end)
219 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
223 * Check the logical offsets if applicable. CoW staging extents
224 * don't track logical offsets since the mappings only exist in
227 if (info->whichfork != XFS_COW_FORK) {
228 rmap_end = (unsigned long long)rmap.rm_offset +
230 if (rmap.rm_offset > irec->br_startoff ||
231 irec->br_startoff + irec->br_blockcount > rmap_end)
232 xchk_fblock_xref_set_corrupt(info->sc,
233 info->whichfork, irec->br_startoff);
236 if (rmap.rm_owner != owner)
237 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
241 * Check for discrepancies between the unwritten flag in the irec and
242 * the rmap. Note that the (in-memory) CoW fork distinguishes between
243 * unwritten and written extents, but we don't track that in the rmap
244 * records because the blocks are owned (on-disk) by the refcountbt,
245 * which doesn't track unwritten state.
247 if (owner != XFS_RMAP_OWN_COW &&
248 !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
249 !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
250 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
253 if (!!(info->whichfork == XFS_ATTR_FORK) !=
254 !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
255 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
257 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
258 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
262 * If the rmap starts before this bmbt record, make sure there's a bmbt
263 * record for the previous offset that is contiguous with this mapping.
264 * Skip this for CoW fork extents because the refcount btree (and not
265 * the inode) is the ondisk owner for those extents.
267 if (info->whichfork != XFS_COW_FORK && rmap.rm_startblock < agbno &&
268 !xchk_bmap_has_prev(info, irec)) {
269 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
275 * If the rmap ends after this bmbt record, make sure there's a bmbt
276 * record for the next offset that is contiguous with this mapping.
277 * Skip this for CoW fork extents because the refcount btree (and not
278 * the inode) is the ondisk owner for those extents.
280 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
281 if (info->whichfork != XFS_COW_FORK &&
282 rmap_end > agbno + irec->br_blockcount &&
283 !xchk_bmap_has_next(info, irec)) {
284 xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
290 /* Cross-reference a single rtdev extent record. */
292 xchk_bmap_rt_iextent_xref(
293 struct xfs_inode *ip,
294 struct xchk_bmap_info *info,
295 struct xfs_bmbt_irec *irec)
297 xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
298 irec->br_blockcount);
301 /* Cross-reference a single datadev extent record. */
303 xchk_bmap_iextent_xref(
304 struct xfs_inode *ip,
305 struct xchk_bmap_info *info,
306 struct xfs_bmbt_irec *irec)
308 struct xfs_mount *mp = info->sc->mp;
314 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
315 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
316 len = irec->br_blockcount;
318 error = xchk_ag_init_existing(info->sc, agno, &info->sc->sa);
319 if (!xchk_fblock_process_error(info->sc, info->whichfork,
320 irec->br_startoff, &error))
323 xchk_xref_is_used_space(info->sc, agbno, len);
324 xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
325 xchk_bmap_xref_rmap(info, irec, agbno);
326 switch (info->whichfork) {
328 if (xfs_is_reflink_inode(info->sc->ip))
332 xchk_xref_is_not_shared(info->sc, agbno,
333 irec->br_blockcount);
336 xchk_xref_is_cow_staging(info->sc, agbno,
337 irec->br_blockcount);
338 xchk_xref_is_not_shared(info->sc, agbno,
339 irec->br_blockcount);
344 xchk_ag_free(info->sc, &info->sc->sa);
348 * Directories and attr forks should never have blocks that can't be addressed
352 xchk_bmap_dirattr_extent(
353 struct xfs_inode *ip,
354 struct xchk_bmap_info *info,
355 struct xfs_bmbt_irec *irec)
357 struct xfs_mount *mp = ip->i_mount;
360 if (!S_ISDIR(VFS_I(ip)->i_mode) && info->whichfork != XFS_ATTR_FORK)
363 if (!xfs_verify_dablk(mp, irec->br_startoff))
364 xchk_fblock_set_corrupt(info->sc, info->whichfork,
367 off = irec->br_startoff + irec->br_blockcount - 1;
368 if (!xfs_verify_dablk(mp, off))
369 xchk_fblock_set_corrupt(info->sc, info->whichfork, off);
372 /* Scrub a single extent record. */
375 struct xfs_inode *ip,
376 struct xchk_bmap_info *info,
377 struct xfs_bmbt_irec *irec)
379 struct xfs_mount *mp = info->sc->mp;
382 * Check for out-of-order extents. This record could have come
383 * from the incore list, for which there is no ordering check.
385 if (irec->br_startoff < info->lastoff)
386 xchk_fblock_set_corrupt(info->sc, info->whichfork,
389 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
390 xchk_fblock_set_corrupt(info->sc, info->whichfork,
393 xchk_bmap_dirattr_extent(ip, info, irec);
395 /* There should never be a "hole" extent in either extent list. */
396 if (irec->br_startblock == HOLESTARTBLOCK)
397 xchk_fblock_set_corrupt(info->sc, info->whichfork,
400 /* Make sure the extent points to a valid place. */
401 if (irec->br_blockcount > XFS_MAX_BMBT_EXTLEN)
402 xchk_fblock_set_corrupt(info->sc, info->whichfork,
405 !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
406 xchk_fblock_set_corrupt(info->sc, info->whichfork,
409 !xfs_verify_fsbext(mp, irec->br_startblock, irec->br_blockcount))
410 xchk_fblock_set_corrupt(info->sc, info->whichfork,
413 /* We don't allow unwritten extents on attr forks. */
414 if (irec->br_state == XFS_EXT_UNWRITTEN &&
415 info->whichfork == XFS_ATTR_FORK)
416 xchk_fblock_set_corrupt(info->sc, info->whichfork,
419 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
423 xchk_bmap_rt_iextent_xref(ip, info, irec);
425 xchk_bmap_iextent_xref(ip, info, irec);
428 /* Scrub a bmbt record. */
431 struct xchk_btree *bs,
432 const union xfs_btree_rec *rec)
434 struct xfs_bmbt_irec irec;
435 struct xfs_bmbt_irec iext_irec;
436 struct xfs_iext_cursor icur;
437 struct xchk_bmap_info *info = bs->private;
438 struct xfs_inode *ip = bs->cur->bc_ino.ip;
439 struct xfs_buf *bp = NULL;
440 struct xfs_btree_block *block;
441 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, info->whichfork);
446 * Check the owners of the btree blocks up to the level below
447 * the root since the verifiers don't do that.
449 if (xfs_has_crc(bs->cur->bc_mp) &&
450 bs->cur->bc_levels[0].ptr == 1) {
451 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
452 block = xfs_btree_get_block(bs->cur, i, &bp);
453 owner = be64_to_cpu(block->bb_u.l.bb_owner);
454 if (owner != ip->i_ino)
455 xchk_fblock_set_corrupt(bs->sc,
461 * Check that the incore extent tree contains an extent that matches
462 * this one exactly. We validate those cached bmaps later, so we don't
463 * need to check them here. If the incore extent tree was just loaded
464 * from disk by the scrubber, we assume that its contents match what's
465 * on disk (we still hold the ILOCK) and skip the equivalence check.
467 if (!info->was_loaded)
470 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
471 if (!xfs_iext_lookup_extent(ip, ifp, irec.br_startoff, &icur,
473 irec.br_startoff != iext_irec.br_startoff ||
474 irec.br_startblock != iext_irec.br_startblock ||
475 irec.br_blockcount != iext_irec.br_blockcount ||
476 irec.br_state != iext_irec.br_state)
477 xchk_fblock_set_corrupt(bs->sc, info->whichfork,
482 /* Scan the btree records. */
485 struct xfs_scrub *sc,
487 struct xchk_bmap_info *info)
489 struct xfs_owner_info oinfo;
490 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
491 struct xfs_mount *mp = sc->mp;
492 struct xfs_inode *ip = sc->ip;
493 struct xfs_btree_cur *cur;
496 /* Load the incore bmap cache if it's not loaded. */
497 info->was_loaded = !xfs_need_iread_extents(ifp);
499 error = xfs_iread_extents(sc->tp, ip, whichfork);
500 if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
503 /* Check the btree structure. */
504 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
505 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
506 error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
507 xfs_btree_del_cursor(cur, error);
512 struct xchk_bmap_check_rmap_info {
513 struct xfs_scrub *sc;
515 struct xfs_iext_cursor icur;
518 /* Can we find bmaps that fit this rmap? */
520 xchk_bmap_check_rmap(
521 struct xfs_btree_cur *cur,
522 const struct xfs_rmap_irec *rec,
525 struct xfs_bmbt_irec irec;
526 struct xfs_rmap_irec check_rec;
527 struct xchk_bmap_check_rmap_info *sbcri = priv;
528 struct xfs_ifork *ifp;
529 struct xfs_scrub *sc = sbcri->sc;
532 /* Is this even the right fork? */
533 if (rec->rm_owner != sc->ip->i_ino)
535 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
536 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
538 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
541 /* Now look up the bmbt record. */
542 ifp = xfs_ifork_ptr(sc->ip, sbcri->whichfork);
544 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
548 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
549 &sbcri->icur, &irec);
551 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
554 * bmap extent record lengths are constrained to 2^21 blocks in length
555 * because of space constraints in the on-disk metadata structure.
556 * However, rmap extent record lengths are constrained only by AG
557 * length, so we have to loop through the bmbt to make sure that the
558 * entire rmap is covered by bmbt records.
562 if (irec.br_startoff != check_rec.rm_offset)
563 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
564 check_rec.rm_offset);
565 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
566 cur->bc_ag.pag->pag_agno,
567 check_rec.rm_startblock))
568 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
569 check_rec.rm_offset);
570 if (irec.br_blockcount > check_rec.rm_blockcount)
571 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
572 check_rec.rm_offset);
573 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
575 check_rec.rm_startblock += irec.br_blockcount;
576 check_rec.rm_offset += irec.br_blockcount;
577 check_rec.rm_blockcount -= irec.br_blockcount;
578 if (check_rec.rm_blockcount == 0)
580 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
582 xchk_fblock_set_corrupt(sc, sbcri->whichfork,
583 check_rec.rm_offset);
587 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
592 /* Make sure each rmap has a corresponding bmbt entry. */
594 xchk_bmap_check_ag_rmaps(
595 struct xfs_scrub *sc,
597 struct xfs_perag *pag)
599 struct xchk_bmap_check_rmap_info sbcri;
600 struct xfs_btree_cur *cur;
604 error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf);
608 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag);
611 sbcri.whichfork = whichfork;
612 error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
613 if (error == -ECANCELED)
616 xfs_btree_del_cursor(cur, error);
617 xfs_trans_brelse(sc->tp, agf);
621 /* Make sure each rmap has a corresponding bmbt entry. */
623 xchk_bmap_check_rmaps(
624 struct xfs_scrub *sc,
627 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork);
628 struct xfs_perag *pag;
633 if (!xfs_has_rmapbt(sc->mp) ||
634 whichfork == XFS_COW_FORK ||
635 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
638 /* Don't support realtime rmap checks yet. */
639 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
642 ASSERT(xfs_ifork_ptr(sc->ip, whichfork) != NULL);
645 * Only do this for complex maps that are in btree format, or for
646 * situations where we would seem to have a size but zero extents.
647 * The inode repair code can zap broken iforks, which means we have
648 * to flag this bmap as corrupt if there are rmaps that need to be
652 if (whichfork == XFS_DATA_FORK)
653 zero_size = i_size_read(VFS_I(sc->ip)) == 0;
657 if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
658 (zero_size || ifp->if_nextents > 0))
661 for_each_perag(sc->mp, agno, pag) {
662 error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag);
664 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
673 /* Scrub a delalloc reservation from the incore extent map tree. */
675 xchk_bmap_iextent_delalloc(
676 struct xfs_inode *ip,
677 struct xchk_bmap_info *info,
678 struct xfs_bmbt_irec *irec)
680 struct xfs_mount *mp = info->sc->mp;
683 * Check for out-of-order extents. This record could have come
684 * from the incore list, for which there is no ordering check.
686 if (irec->br_startoff < info->lastoff)
687 xchk_fblock_set_corrupt(info->sc, info->whichfork,
690 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
691 xchk_fblock_set_corrupt(info->sc, info->whichfork,
694 /* Make sure the extent points to a valid place. */
695 if (irec->br_blockcount > XFS_MAX_BMBT_EXTLEN)
696 xchk_fblock_set_corrupt(info->sc, info->whichfork,
701 * Scrub an inode fork's block mappings.
703 * First we scan every record in every btree block, if applicable.
704 * Then we unconditionally scan the incore extent cache.
708 struct xfs_scrub *sc,
711 struct xfs_bmbt_irec irec;
712 struct xchk_bmap_info info = { NULL };
713 struct xfs_mount *mp = sc->mp;
714 struct xfs_inode *ip = sc->ip;
715 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
716 xfs_fileoff_t endoff;
719 /* Non-existent forks can be ignored. */
723 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
724 info.whichfork = whichfork;
725 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
730 /* No CoW forks on non-reflink inodes/filesystems. */
731 if (!xfs_is_reflink_inode(ip)) {
732 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
737 if (!xfs_has_attr(mp) && !xfs_has_attr2(mp))
738 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
741 ASSERT(whichfork == XFS_DATA_FORK);
745 /* Check the fork values */
746 switch (ifp->if_format) {
747 case XFS_DINODE_FMT_UUID:
748 case XFS_DINODE_FMT_DEV:
749 case XFS_DINODE_FMT_LOCAL:
750 /* No mappings to check. */
751 if (whichfork == XFS_COW_FORK)
752 xchk_fblock_set_corrupt(sc, whichfork, 0);
754 case XFS_DINODE_FMT_EXTENTS:
756 case XFS_DINODE_FMT_BTREE:
757 if (whichfork == XFS_COW_FORK) {
758 xchk_fblock_set_corrupt(sc, whichfork, 0);
762 error = xchk_bmap_btree(sc, whichfork, &info);
767 xchk_fblock_set_corrupt(sc, whichfork, 0);
771 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
774 /* Find the offset of the last extent in the mapping. */
775 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
776 if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
779 /* Scrub extent records. */
781 ifp = xfs_ifork_ptr(ip, whichfork);
782 for_each_xfs_iext(ifp, &info.icur, &irec) {
783 if (xchk_should_terminate(sc, &error) ||
784 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
787 if (irec.br_startoff >= endoff) {
788 xchk_fblock_set_corrupt(sc, whichfork,
793 if (isnullstartblock(irec.br_startblock))
794 xchk_bmap_iextent_delalloc(ip, &info, &irec);
796 xchk_bmap_iextent(ip, &info, &irec);
797 info.lastoff = irec.br_startoff + irec.br_blockcount;
800 error = xchk_bmap_check_rmaps(sc, whichfork);
801 if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
807 /* Scrub an inode's data fork. */
810 struct xfs_scrub *sc)
812 return xchk_bmap(sc, XFS_DATA_FORK);
815 /* Scrub an inode's attr fork. */
818 struct xfs_scrub *sc)
820 return xchk_bmap(sc, XFS_ATTR_FORK);
823 /* Scrub an inode's CoW fork. */
826 struct xfs_scrub *sc)
828 if (!xfs_is_reflink_inode(sc->ip))
831 return xchk_bmap(sc, XFS_COW_FORK);