/* Cross-reference with the other btrees. */
STATIC void
xchk_superblock_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *bp)
{
struct xfs_owner_info oinfo;
*/
int
xchk_superblock(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp;
/* Check agf_freeblks */
static inline void
xchk_agf_xref_freeblks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
xfs_extlen_t blocks = 0;
/* Cross reference the AGF with the cntbt (freespace by length btree) */
static inline void
xchk_agf_xref_cntbt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
xfs_agblock_t agbno;
/* Check the btree block counts in the AGF against the btrees. */
STATIC void
xchk_agf_xref_btreeblks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
struct xfs_mount *mp = sc->mp;
/* Check agf_refcount_blocks against tree size */
static inline void
xchk_agf_xref_refcblks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
xfs_agblock_t blocks;
/* Cross-reference with the other btrees. */
STATIC void
xchk_agf_xref(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
/* Scrub the AGF. */
int
xchk_agf(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_agf *agf;
unsigned int sz_entries;
unsigned int nr_entries;
xfs_agblock_t *entries;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/* Cross-reference with the other btrees. */
STATIC void
xchk_agfl_block_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
struct xfs_owner_info *oinfo)
{
void *priv)
{
struct xchk_agfl_info *sai = priv;
- struct xfs_scrub_context *sc = sai->sc;
+ struct xfs_scrub *sc = sai->sc;
xfs_agnumber_t agno = sc->sa.agno;
if (xfs_verify_agbno(mp, agno, agbno) &&
/* Cross-reference with the other btrees. */
STATIC void
xchk_agfl_xref(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
/* Scrub the AGFL. */
int
xchk_agfl(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xchk_agfl_info sai;
struct xfs_agf *agf;
/* Check agi_count/agi_freecount */
static inline void
xchk_agi_xref_icounts(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
xfs_agino_t icount;
/* Cross-reference with the other btrees. */
STATIC void
xchk_agi_xref(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
/* Scrub the AGI. */
int
xchk_agi(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_agi *agi;
/* Repair the superblock. */
int
xrep_superblock(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp;
*/
int
xchk_setup_ag_allocbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_ag_btree(sc, ip, false);
*/
STATIC void
xchk_allocbt_xref_other(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
/* Cross-reference with the other btrees. */
STATIC void
xchk_allocbt_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
/* Scrub the freespace btrees for some AG. */
STATIC int
xchk_allocbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_btnum_t which)
{
struct xfs_owner_info oinfo;
int
xchk_bnobt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_allocbt(sc, XFS_BTNUM_BNO);
}
int
xchk_cntbt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_allocbt(sc, XFS_BTNUM_CNT);
}
/* xref check that the extent is not free */
void
xchk_xref_is_used_space(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
/* Set us up to scrub an inode's extended attributes. */
int
xchk_setup_xattr(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
size_t sz;
struct xchk_xattr {
struct xfs_attr_list_context context;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/*
*/
STATIC bool
xchk_xattr_set_map(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
unsigned long *map,
unsigned int start,
unsigned int len)
*/
STATIC bool
xchk_xattr_check_freemap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr)
{
/* Scrub the extended attribute metadata. */
int
xchk_xattr(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xchk_xattr sx;
struct attrlist_cursor_kern cursor = { 0 };
/* Set us up with an inode's bmap. */
int
xchk_setup_inode_bmap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
int error;
*/
struct xchk_bmap_info {
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
xfs_fileoff_t lastoff;
bool is_rt;
bool is_shared;
/* Scan the btree records. */
STATIC int
xchk_bmap_btree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
struct xchk_bmap_info *info)
{
}
struct xchk_bmap_check_rmap_info {
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
int whichfork;
struct xfs_iext_cursor icur;
};
struct xfs_bmbt_irec irec;
struct xchk_bmap_check_rmap_info *sbcri = priv;
struct xfs_ifork *ifp;
- struct xfs_scrub_context *sc = sbcri->sc;
+ struct xfs_scrub *sc = sbcri->sc;
bool have_map;
/* Is this even the right fork? */
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
xchk_bmap_check_ag_rmaps(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_agnumber_t agno)
{
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
xchk_bmap_check_rmaps(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork)
{
loff_t size;
*/
STATIC int
xchk_bmap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork)
{
struct xfs_bmbt_irec irec;
/* Scrub an inode's data fork. */
int
xchk_bmap_data(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_bmap(sc, XFS_DATA_FORK);
}
/* Scrub an inode's attr fork. */
int
xchk_bmap_attr(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_bmap(sc, XFS_ATTR_FORK);
}
/* Scrub an inode's CoW fork. */
int
xchk_bmap_cow(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
if (!xfs_is_reflink_inode(sc->ip))
return -ENOENT;
*/
static bool
__xchk_btree_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level,
int *error,
bool
xchk_btree_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level,
int *error)
bool
xchk_btree_xref_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level,
int *error)
/* Record btree block corruption. */
static void
__xchk_btree_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level,
__u32 errflag,
void
xchk_btree_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level)
{
void
xchk_btree_xref_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
int level)
{
*/
int
xchk_btree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo,
/* btree scrub */
/* Check for btree operation errors. */
-bool xchk_btree_process_error(struct xfs_scrub_context *sc,
+bool xchk_btree_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */
-bool xchk_btree_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_btree_xref_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level,
int *error);
/* Check for btree corruption. */
-void xchk_btree_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */
-void xchk_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
struct xchk_btree;
struct xchk_btree {
/* caller-provided scrub state */
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
struct xfs_btree_cur *cur;
xchk_btree_rec_fn scrub_rec;
struct xfs_owner_info *oinfo;
bool firstkey[XFS_BTREE_MAXLEVELS];
struct list_head to_check;
};
-int xchk_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
xchk_btree_rec_fn scrub_fn,
struct xfs_owner_info *oinfo, void *private);
/* Check for operational errors. */
static bool
__xchk_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error,
bool
xchk_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error)
bool
xchk_xref_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
int *error)
/* Check for operational errors for a file offset. */
static bool
__xchk_fblock_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error,
bool
xchk_fblock_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error)
bool
xchk_fblock_xref_process_error(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset,
int *error)
/* Record a block which could be optimized. */
void
xchk_block_set_preen(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
*/
void
xchk_ino_set_preen(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
/* Record a corrupt block. */
void
xchk_block_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
/* Record a corruption while cross-referencing. */
void
xchk_block_xref_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
*/
void
xchk_ino_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
/* Record a corruption while cross-referencing with an inode. */
void
xchk_ino_xref_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
/* Record corruption in a block indexed by a file fork. */
void
xchk_fblock_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
/* Record a corruption while cross-referencing a fork block. */
void
xchk_fblock_xref_set_corrupt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
*/
void
xchk_ino_set_warning(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
/* Warn about a block indexed by a file fork that needs review. */
void
xchk_fblock_set_warning(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xfs_fileoff_t offset)
{
/* Signal an incomplete scrub. */
void
xchk_set_incomplete(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
trace_xchk_incomplete(sc, __return_address);
*/
int
xchk_count_rmap_ownedby_ag(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks)
/* Decide if we want to return an AG header read failure. */
static inline bool
want_ag_read_header_failure(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
unsigned int type)
{
/* Return all AG header read failures when scanning btrees. */
*/
int
xchk_ag_read_headers(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agnumber_t agno,
struct xfs_buf **agi,
struct xfs_buf **agf,
/* Initialize all the btree cursors for an AG. */
int
xchk_ag_btcur_init(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xchk_ag *sa)
{
struct xfs_mount *mp = sc->mp;
/* Release the AG header context and btree cursors. */
void
xchk_ag_free(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xchk_ag *sa)
{
xchk_ag_btcur_free(sa);
*/
int
xchk_ag_init(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agnumber_t agno,
struct xchk_ag *sa)
{
*/
int
xchk_trans_alloc(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
uint resblks)
{
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
/* Set us up with a transaction and an empty context. */
int
xchk_setup_fs(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
uint resblks;
/* Set us up with AG headers and btree cursors. */
int
xchk_setup_ag_btree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip,
bool force_log)
{
*/
int
xchk_get_inode(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip_in)
{
struct xfs_imap imap;
/* Set us up to scrub a file's contents. */
int
xchk_setup_inode_contents(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip,
unsigned int resblks)
{
*/
bool
xchk_should_check_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int *error,
struct xfs_btree_cur **curpp)
{
/* Run the structure verifiers on in-memory buffers to detect bad memory. */
void
xchk_buffer_recheck(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *bp)
{
xfs_failaddr_t fa;
*/
int
xchk_metadata_inode_forks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
__u32 smtype;
bool shared;
*/
static inline bool
xchk_should_terminate(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int *error)
{
if (fatal_signal_pending(current)) {
return false;
}
-int xchk_trans_alloc(struct xfs_scrub_context *sc, uint resblks);
-bool xchk_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
+bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error);
-bool xchk_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
+bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int *error);
-bool xchk_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_xref_process_error(struct xfs_scrub *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
-bool xchk_fblock_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset, int *error);
-void xchk_block_set_preen(struct xfs_scrub_context *sc,
+void xchk_block_set_preen(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xchk_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino);
+void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
-void xchk_block_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xchk_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xchk_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xchk_block_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xchk_ino_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
xfs_ino_t ino);
-void xchk_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset);
-void xchk_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xchk_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xchk_set_incomplete(struct xfs_scrub_context *sc);
+void xchk_set_incomplete(struct xfs_scrub *sc);
int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */
-bool xchk_should_check_xref(struct xfs_scrub_context *sc, int *error,
+bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
struct xfs_btree_cur **curpp);
/* Setup functions */
-int xchk_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
-int xchk_setup_ag_allocbt(struct xfs_scrub_context *sc,
+int xchk_setup_fs(struct xfs_scrub *sc, struct xfs_inode *ip);
+int xchk_setup_ag_allocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_ag_iallocbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_iallocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_ag_rmapbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_rmapbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_ag_refcountbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_refcountbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_inode(struct xfs_scrub_context *sc,
+int xchk_setup_inode(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_inode_bmap(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_inode_bmap_data(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap_data(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_directory(struct xfs_scrub_context *sc,
+int xchk_setup_directory(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_xattr(struct xfs_scrub_context *sc,
+int xchk_setup_xattr(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_symlink(struct xfs_scrub_context *sc,
+int xchk_setup_symlink(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xchk_setup_parent(struct xfs_scrub_context *sc,
+int xchk_setup_parent(struct xfs_scrub *sc,
struct xfs_inode *ip);
#ifdef CONFIG_XFS_RT
-int xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xchk_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xchk_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
-void xchk_ag_free(struct xfs_scrub_context *sc, struct xchk_ag *sa);
-int xchk_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
+int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xchk_ag *sa);
void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
-int xchk_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xfs_buf **agi, struct xfs_buf **agf,
struct xfs_buf **agfl);
void xchk_ag_btcur_free(struct xchk_ag *sa);
-int xchk_ag_btcur_init(struct xfs_scrub_context *sc,
+int xchk_ag_btcur_init(struct xfs_scrub *sc,
struct xchk_ag *sa);
-int xchk_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
+int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks);
-int xchk_setup_ag_btree(struct xfs_scrub_context *sc,
+int xchk_setup_ag_btree(struct xfs_scrub *sc,
struct xfs_inode *ip, bool force_log);
-int xchk_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
-int xchk_setup_inode_contents(struct xfs_scrub_context *sc,
+int xchk_get_inode(struct xfs_scrub *sc, struct xfs_inode *ip_in);
+int xchk_setup_inode_contents(struct xfs_scrub *sc,
struct xfs_inode *ip, unsigned int resblks);
-void xchk_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
+void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
/*
* Don't bother cross-referencing if we already found corruption or cross
XFS_SCRUB_OFLAG_XCORRUPT);
}
-int xchk_metadata_inode_forks(struct xfs_scrub_context *sc);
+int xchk_metadata_inode_forks(struct xfs_scrub *sc);
int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
#endif /* __XFS_SCRUB_COMMON_H__ */
int level,
int *error)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
if (*error == 0)
return true;
struct xchk_da_btree *ds,
int level)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
/* Visit all nodes and leaves of a da btree. */
int
xchk_da_btree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int whichfork,
xchk_da_btree_rec_fn scrub_fn,
void *private)
xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
int maxrecs[XFS_DA_NODE_MAXDEPTH];
struct xfs_da_state *state;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
void *private;
/*
int xchk_da_btree_hash(struct xchk_da_btree *ds, int level,
__be32 *hashp);
-int xchk_da_btree(struct xfs_scrub_context *sc, int whichfork,
+int xchk_da_btree(struct xfs_scrub *sc, int whichfork,
xchk_da_btree_rec_fn scrub_fn, void *private);
#endif /* __XFS_SCRUB_DABTREE_H__ */
/* Set us up to scrub directories. */
int
xchk_setup_directory(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_inode_contents(sc, ip, 0);
/* VFS fill-directory iterator */
struct dir_context dir_iter;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/* Check that an inode's mode matches a given DT_ type. */
*/
STATIC void
xchk_directory_check_free_entry(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_dir2_data_free *bf,
struct xfs_dir2_data_unused *dup)
/* Check free space info in a directory data block. */
STATIC int
xchk_directory_data_bestfree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
bool is_block)
{
*/
STATIC void
xchk_directory_check_freesp(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_buf *dbp,
unsigned int len)
/* Check free space info in a directory leaf1 block. */
STATIC int
xchk_directory_leaf1_bestfree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
/* Check free space info in a directory freespace block. */
STATIC int
xchk_directory_free_bestfree(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
/* Check free space information in directories. */
STATIC int
xchk_directory_blocks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_bmbt_irec got;
struct xfs_da_args args;
/* Scrub a whole directory. */
int
xchk_directory(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xchk_dir_ctx sdc = {
.dir_iter.actor = xchk_dir_actor,
*/
int
xchk_setup_ag_iallocbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_ag_btree(sc, ip, sc->try_harder);
*/
static inline void
xchk_iallocbt_chunk_xref_other(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino)
{
/* Cross-reference with the other btrees. */
STATIC void
xchk_iallocbt_chunk_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino,
xfs_agblock_t agbno,
*/
STATIC void
xchk_iallocbt_xref_rmap_btreeblks(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int which)
{
struct xfs_owner_info oinfo;
*/
STATIC void
xchk_iallocbt_xref_rmap_inodes(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
int which,
xfs_filblks_t inode_blocks)
{
/* Scrub the inode btrees for some AG. */
STATIC int
xchk_iallocbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_btnum_t which)
{
struct xfs_btree_cur *cur;
int
xchk_inobt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_iallocbt(sc, XFS_BTNUM_INO);
}
int
xchk_finobt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
return xchk_iallocbt(sc, XFS_BTNUM_FINO);
}
/* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void
xchk_xref_inode_check(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len,
struct xfs_btree_cur **icur,
/* xref check that the extent is not covered by inodes */
void
xchk_xref_is_not_inode_chunk(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
/* xref check that the extent is covered by inodes */
void
xchk_xref_is_inode_chunk(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
*/
int
xchk_setup_inode(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
int error;
/* Validate di_extsize hint. */
STATIC void
xchk_inode_extsize(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip,
xfs_ino_t ino,
uint16_t mode,
*/
STATIC void
xchk_inode_cowextsize(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip,
xfs_ino_t ino,
uint16_t mode,
/* Make sure the di_flags make sense for the inode. */
STATIC void
xchk_inode_flags(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip,
xfs_ino_t ino,
uint16_t mode,
/* Make sure the di_flags2 make sense for the inode. */
STATIC void
xchk_inode_flags2(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip,
xfs_ino_t ino,
uint16_t mode,
/* Scrub all the ondisk inode fields. */
STATIC void
xchk_dinode(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip,
xfs_ino_t ino)
{
*/
static void
xchk_inode_xref_finobt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
struct xfs_inobt_rec_incore rec;
/* Cross reference the inode fields with the forks. */
STATIC void
xchk_inode_xref_bmap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_dinode *dip)
{
xfs_extnum_t nextents;
/* Cross-reference with the other btrees. */
STATIC void
xchk_inode_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino,
struct xfs_dinode *dip)
{
*/
static void
xchk_inode_check_reflink_iflag(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
struct xfs_mount *mp = sc->mp;
/* Scrub an inode. */
int
xchk_inode(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_dinode di;
int error = 0;
/* Set us up to scrub parents. */
int
xchk_setup_parent(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_inode_contents(sc, ip, 0);
/* Count the number of dentries in the parent dir that point to this inode. */
STATIC int
xchk_parent_count_parent_dentries(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *parent,
xfs_nlink_t *nlink)
{
*/
STATIC int
xchk_parent_validate(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_ino_t dnum,
bool *try_again)
{
/* Scrub a parent pointer. */
int
xchk_parent(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
xfs_ino_t dnum;
/* Convert a scrub type code to a DQ flag, or return 0 if error. */
static inline uint
xchk_quota_to_dqtype(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_UQUOTA:
/* Set us up to scrub a quota. */
int
xchk_setup_quota(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
uint dqtype;
/* Quotas. */
struct xchk_quota_info {
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
xfs_dqid_t last_id;
};
void *priv)
{
struct xchk_quota_info *sqi = priv;
- struct xfs_scrub_context *sc = sqi->sc;
+ struct xfs_scrub *sc = sqi->sc;
struct xfs_mount *mp = sc->mp;
struct xfs_disk_dquot *d = &dq->q_core;
struct xfs_quotainfo *qi = mp->m_quotainfo;
/* Check the quota's data fork. */
STATIC int
xchk_quota_data_fork(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_bmbt_irec irec = { 0 };
struct xfs_iext_cursor icur;
/* Scrub all of a quota type's items. */
int
xchk_quota(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xchk_quota_info sqi;
struct xfs_mount *mp = sc->mp;
*/
int
xchk_setup_ag_refcountbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_ag_btree(sc, ip, false);
};
struct xchk_refcnt_check {
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
struct list_head fragments;
/* refcount extent we're examining */
/* Use the rmap entries covering this extent to verify the refcount. */
STATIC void
xchk_refcountbt_xref_rmap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
xfs_nlink_t refcount)
/* Cross-reference with the other btrees. */
STATIC void
xchk_refcountbt_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len,
xfs_nlink_t refcount)
/* Make sure we have as many refc blocks as the rmap says. */
STATIC void
xchk_refcount_xref_rmap(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_owner_info *oinfo,
xfs_filblks_t cow_blocks)
{
/* Scrub the refcount btree for some AG. */
int
xchk_refcountbt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_owner_info oinfo;
xfs_agblock_t cow_blocks = 0;
/* xref check that a cow staging extent is marked in the refcountbt. */
void
xchk_xref_is_cow_staging(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
*/
void
xchk_xref_is_not_shared(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
int
xrep_attempt(
struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
bool *fixed)
{
int error = 0;
*/
int
xrep_probe(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
int error = 0;
*/
int
xrep_roll_ag_trans(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
int error;
*/
xfs_extlen_t
xrep_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_scrub_metadata *sm = sc->sm;
/* Allocate a block in an AG. */
int
xrep_alloc_ag_block(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_owner_info *oinfo,
xfs_fsblock_t *fsbno,
enum xfs_ag_resv_type resv)
/* Initialize a new AG btree root block with zero entries. */
int
xrep_init_btblock(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_fsblock_t fsb,
struct xfs_buf **bpp,
xfs_btnum_t btnum,
/* Collect a dead btree extent for later disposal. */
int
xrep_collect_btree_extent(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xrep_extent_list *exlist,
xfs_fsblock_t fsbno,
xfs_extlen_t len)
*/
void
xrep_cancel_btree_extents(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xrep_extent_list *exlist)
{
struct xrep_extent *rex;
#define RIGHT_ALIGNED (1 << 1)
int
xrep_subtract_extents(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xrep_extent_list *exlist,
struct xrep_extent_list *sublist)
{
*/
int
xrep_invalidate_blocks(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xrep_extent_list *exlist)
{
struct xrep_extent *rex;
/* Ensure the freelist is the correct size. */
int
xrep_fix_freelist(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
bool can_shrink)
{
struct xfs_alloc_arg args = {0};
*/
STATIC int
xrep_put_freelist(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t agbno)
{
struct xfs_owner_info oinfo;
/* Dispose of a single metadata block. */
STATIC int
xrep_dispose_btree_block(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_fsblock_t fsbno,
struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type resv)
/* Dispose of btree blocks from an old per-AG btree. */
int
xrep_reap_btree_extents(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xrep_extent_list *exlist,
struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
*/
struct xrep_findroot {
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
struct xfs_buf *agfl_bp;
struct xfs_agf *agf;
struct xrep_find_ag_btree *btree_info;
/* Find the roots of the per-AG btrees described in btree_info. */
int
xrep_find_ag_btree_roots(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_buf *agf_bp,
struct xrep_find_ag_btree *btree_info,
struct xfs_buf *agfl_bp)
/* Force a quotacheck the next time we mount. */
void
xrep_force_quotacheck(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
uint dqtype)
{
uint flag;
*/
int
xrep_ino_dqattach(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
int error;
#ifndef __XFS_SCRUB_REPAIR_H__
#define __XFS_SCRUB_REPAIR_H__
-static inline int xrep_notsupported(struct xfs_scrub_context *sc)
+static inline int xrep_notsupported(struct xfs_scrub *sc)
{
return -EOPNOTSUPP;
}
/* Repair helpers */
-int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub_context *sc,
+int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub *sc,
bool *fixed);
void xrep_failure(struct xfs_mount *mp);
-int xrep_roll_ag_trans(struct xfs_scrub_context *sc);
+int xrep_roll_ag_trans(struct xfs_scrub *sc);
bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
enum xfs_ag_resv_type type);
-xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub_context *sc);
-int xrep_alloc_ag_block(struct xfs_scrub_context *sc,
+xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc);
+int xrep_alloc_ag_block(struct xfs_scrub *sc,
struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno,
enum xfs_ag_resv_type resv);
-int xrep_init_btblock(struct xfs_scrub_context *sc, xfs_fsblock_t fsb,
+int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
struct xfs_buf **bpp, xfs_btnum_t btnum,
const struct xfs_buf_ops *ops);
#define for_each_xrep_extent_safe(rbe, n, exlist) \
list_for_each_entry_safe((rbe), (n), &(exlist)->list, list)
-int xrep_collect_btree_extent(struct xfs_scrub_context *sc,
+int xrep_collect_btree_extent(struct xfs_scrub *sc,
struct xrep_extent_list *btlist, xfs_fsblock_t fsbno,
xfs_extlen_t len);
-void xrep_cancel_btree_extents(struct xfs_scrub_context *sc,
+void xrep_cancel_btree_extents(struct xfs_scrub *sc,
struct xrep_extent_list *btlist);
-int xrep_subtract_extents(struct xfs_scrub_context *sc,
+int xrep_subtract_extents(struct xfs_scrub *sc,
struct xrep_extent_list *exlist,
struct xrep_extent_list *sublist);
-int xrep_fix_freelist(struct xfs_scrub_context *sc, bool can_shrink);
-int xrep_invalidate_blocks(struct xfs_scrub_context *sc,
+int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
+int xrep_invalidate_blocks(struct xfs_scrub *sc,
struct xrep_extent_list *btlist);
-int xrep_reap_btree_extents(struct xfs_scrub_context *sc,
+int xrep_reap_btree_extents(struct xfs_scrub *sc,
struct xrep_extent_list *exlist,
struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
unsigned int height;
};
-int xrep_find_ag_btree_roots(struct xfs_scrub_context *sc,
+int xrep_find_ag_btree_roots(struct xfs_scrub *sc,
struct xfs_buf *agf_bp,
struct xrep_find_ag_btree *btree_info,
struct xfs_buf *agfl_bp);
-void xrep_force_quotacheck(struct xfs_scrub_context *sc, uint dqtype);
-int xrep_ino_dqattach(struct xfs_scrub_context *sc);
+void xrep_force_quotacheck(struct xfs_scrub *sc, uint dqtype);
+int xrep_ino_dqattach(struct xfs_scrub *sc);
/* Metadata repairers */
-int xrep_probe(struct xfs_scrub_context *sc);
-int xrep_superblock(struct xfs_scrub_context *sc);
+int xrep_probe(struct xfs_scrub *sc);
+int xrep_superblock(struct xfs_scrub *sc);
#else
static inline int xrep_attempt(
struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
bool *fixed)
{
return -EOPNOTSUPP;
static inline xfs_extlen_t
xrep_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
ASSERT(!(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR));
return 0;
*/
int
xchk_setup_ag_rmapbt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
return xchk_setup_ag_btree(sc, ip, false);
/* Cross-reference a rmap against the refcount btree. */
STATIC void
xchk_rmapbt_xref_refc(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_rmap_irec *irec)
{
xfs_agblock_t fbno;
/* Cross-reference with the other btrees. */
STATIC void
xchk_rmapbt_xref(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_rmap_irec *irec)
{
xfs_agblock_t agbno = irec->rm_startblock;
/* Scrub the rmap btree for some AG. */
int
xchk_rmapbt(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_owner_info oinfo;
/* xref check that the extent is owned by a given owner */
static inline void
xchk_xref_check_owner(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
struct xfs_owner_info *oinfo,
/* xref check that the extent is owned by a given owner */
void
xchk_xref_is_owned_by(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
struct xfs_owner_info *oinfo)
/* xref check that the extent is not owned by a given owner */
void
xchk_xref_is_not_owned_by(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
struct xfs_owner_info *oinfo)
/* xref check that the extent has no reverse mapping at all */
void
xchk_xref_has_no_owner(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len)
{
/* Set us up with the realtime metadata locked. */
int
xchk_setup_rt(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
int error;
struct xfs_rtalloc_rec *rec,
void *priv)
{
- struct xfs_scrub_context *sc = priv;
+ struct xfs_scrub *sc = priv;
xfs_rtblock_t startblock;
xfs_rtblock_t blockcount;
/* Scrub the realtime bitmap. */
int
xchk_rtbitmap(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
int error;
/* Scrub the realtime summary. */
int
xchk_rtsummary(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_inode *rsumip = sc->mp->m_rsumip;
struct xfs_inode *old_ip = sc->ip;
/* xref check that the extent is not free in the rtbitmap */
void
xchk_xref_is_used_rt_space(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
xfs_rtblock_t fsbno,
xfs_extlen_t len)
{
*/
static int
xchk_probe(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
int error = 0;
/* Free all the resources and finish the transactions. */
STATIC int
xchk_teardown(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip_in,
int error)
{
}
#ifdef CONFIG_XFS_ONLINE_REPAIR
-static inline void xchk_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to repair something, we repaired it, rescanned
xrep_failure(sc->mp);
}
#else
-static inline void xchk_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to scrub something, it's broken, and we have no
struct xfs_inode *ip,
struct xfs_scrub_metadata *sm)
{
- struct xfs_scrub_context sc;
+ struct xfs_scrub sc;
struct xfs_mount *mp = ip->i_mount;
bool try_harder = false;
bool already_fixed = false;
#ifndef __XFS_SCRUB_SCRUB_H__
#define __XFS_SCRUB_SCRUB_H__
-struct xfs_scrub_context;
+struct xfs_scrub;
/* Type info and names for the scrub types. */
enum xchk_type {
struct xchk_meta_ops {
/* Acquire whatever resources are needed for the operation. */
- int (*setup)(struct xfs_scrub_context *,
+ int (*setup)(struct xfs_scrub *,
struct xfs_inode *);
/* Examine metadata for errors. */
- int (*scrub)(struct xfs_scrub_context *);
+ int (*scrub)(struct xfs_scrub *);
/* Repair or optimize the metadata. */
- int (*repair)(struct xfs_scrub_context *);
+ int (*repair)(struct xfs_scrub *);
/* Decide if we even have this piece of metadata. */
bool (*has)(struct xfs_sb *);
struct xfs_btree_cur *refc_cur;
};
-struct xfs_scrub_context {
+struct xfs_scrub {
/* General scrub state. */
struct xfs_mount *mp;
struct xfs_scrub_metadata *sm;
};
/* Metadata scrubbers */
-int xchk_tester(struct xfs_scrub_context *sc);
-int xchk_superblock(struct xfs_scrub_context *sc);
-int xchk_agf(struct xfs_scrub_context *sc);
-int xchk_agfl(struct xfs_scrub_context *sc);
-int xchk_agi(struct xfs_scrub_context *sc);
-int xchk_bnobt(struct xfs_scrub_context *sc);
-int xchk_cntbt(struct xfs_scrub_context *sc);
-int xchk_inobt(struct xfs_scrub_context *sc);
-int xchk_finobt(struct xfs_scrub_context *sc);
-int xchk_rmapbt(struct xfs_scrub_context *sc);
-int xchk_refcountbt(struct xfs_scrub_context *sc);
-int xchk_inode(struct xfs_scrub_context *sc);
-int xchk_bmap_data(struct xfs_scrub_context *sc);
-int xchk_bmap_attr(struct xfs_scrub_context *sc);
-int xchk_bmap_cow(struct xfs_scrub_context *sc);
-int xchk_directory(struct xfs_scrub_context *sc);
-int xchk_xattr(struct xfs_scrub_context *sc);
-int xchk_symlink(struct xfs_scrub_context *sc);
-int xchk_parent(struct xfs_scrub_context *sc);
+int xchk_tester(struct xfs_scrub *sc);
+int xchk_superblock(struct xfs_scrub *sc);
+int xchk_agf(struct xfs_scrub *sc);
+int xchk_agfl(struct xfs_scrub *sc);
+int xchk_agi(struct xfs_scrub *sc);
+int xchk_bnobt(struct xfs_scrub *sc);
+int xchk_cntbt(struct xfs_scrub *sc);
+int xchk_inobt(struct xfs_scrub *sc);
+int xchk_finobt(struct xfs_scrub *sc);
+int xchk_rmapbt(struct xfs_scrub *sc);
+int xchk_refcountbt(struct xfs_scrub *sc);
+int xchk_inode(struct xfs_scrub *sc);
+int xchk_bmap_data(struct xfs_scrub *sc);
+int xchk_bmap_attr(struct xfs_scrub *sc);
+int xchk_bmap_cow(struct xfs_scrub *sc);
+int xchk_directory(struct xfs_scrub *sc);
+int xchk_xattr(struct xfs_scrub *sc);
+int xchk_symlink(struct xfs_scrub *sc);
+int xchk_parent(struct xfs_scrub *sc);
#ifdef CONFIG_XFS_RT
-int xchk_rtbitmap(struct xfs_scrub_context *sc);
-int xchk_rtsummary(struct xfs_scrub_context *sc);
+int xchk_rtbitmap(struct xfs_scrub *sc);
+int xchk_rtsummary(struct xfs_scrub *sc);
#else
static inline int
-xchk_rtbitmap(struct xfs_scrub_context *sc)
+xchk_rtbitmap(struct xfs_scrub *sc)
{
return -ENOENT;
}
static inline int
-xchk_rtsummary(struct xfs_scrub_context *sc)
+xchk_rtsummary(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xchk_quota(struct xfs_scrub_context *sc);
+int xchk_quota(struct xfs_scrub *sc);
#else
static inline int
-xchk_quota(struct xfs_scrub_context *sc)
+xchk_quota(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
/* cross-referencing helpers */
-void xchk_xref_is_used_space(struct xfs_scrub_context *sc,
+void xchk_xref_is_used_space(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len);
-void xchk_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
+void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len);
-void xchk_xref_is_inode_chunk(struct xfs_scrub_context *sc,
+void xchk_xref_is_inode_chunk(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len);
-void xchk_xref_is_owned_by(struct xfs_scrub_context *sc,
+void xchk_xref_is_owned_by(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len,
struct xfs_owner_info *oinfo);
-void xchk_xref_is_not_owned_by(struct xfs_scrub_context *sc,
+void xchk_xref_is_not_owned_by(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len,
struct xfs_owner_info *oinfo);
-void xchk_xref_has_no_owner(struct xfs_scrub_context *sc,
+void xchk_xref_has_no_owner(struct xfs_scrub *sc,
xfs_agblock_t agbno, xfs_extlen_t len);
-void xchk_xref_is_cow_staging(struct xfs_scrub_context *sc,
+void xchk_xref_is_cow_staging(struct xfs_scrub *sc,
xfs_agblock_t bno, xfs_extlen_t len);
-void xchk_xref_is_not_shared(struct xfs_scrub_context *sc,
+void xchk_xref_is_not_shared(struct xfs_scrub *sc,
xfs_agblock_t bno, xfs_extlen_t len);
#ifdef CONFIG_XFS_RT
-void xchk_xref_is_used_rt_space(struct xfs_scrub_context *sc,
+void xchk_xref_is_used_rt_space(struct xfs_scrub *sc,
xfs_rtblock_t rtbno, xfs_extlen_t len);
#else
# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
/* Set us up to scrub a symbolic link. */
int
xchk_setup_symlink(
- struct xfs_scrub_context *sc,
+ struct xfs_scrub *sc,
struct xfs_inode *ip)
{
/* Allocate the buffer without the inode lock held. */
int
xchk_symlink(
- struct xfs_scrub_context *sc)
+ struct xfs_scrub *sc)
{
struct xfs_inode *ip = sc->ip;
struct xfs_ifork *ifp;
DEFINE_SCRUB_EVENT(xrep_done);
TRACE_EVENT(xchk_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+ TP_PROTO(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int error, void *ret_ip),
TP_ARGS(sc, agno, bno, error, ret_ip),
TP_STRUCT__entry(
);
TRACE_EVENT(xchk_file_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int error, void *ret_ip),
TP_ARGS(sc, whichfork, offset, error, ret_ip),
TP_STRUCT__entry(
);
DECLARE_EVENT_CLASS(xchk_block_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip),
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, void *ret_ip),
TP_ARGS(sc, daddr, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \
DEFINE_EVENT(xchk_block_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, \
void *ret_ip), \
TP_ARGS(sc, daddr, ret_ip))
DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen);
DECLARE_EVENT_CLASS(xchk_ino_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, void *ret_ip),
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, void *ret_ip),
TP_ARGS(sc, ino, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \
DEFINE_EVENT(xchk_ino_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, \
void *ret_ip), \
TP_ARGS(sc, ino, ret_ip))
DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_warning);
DECLARE_EVENT_CLASS(xchk_fblock_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, void *ret_ip),
TP_ARGS(sc, whichfork, offset, ret_ip),
TP_STRUCT__entry(
#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \
DEFINE_EVENT(xchk_fblock_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \
+ TP_PROTO(struct xfs_scrub *sc, int whichfork, \
xfs_fileoff_t offset, void *ret_ip), \
TP_ARGS(sc, whichfork, offset, ret_ip))
DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_warning);
TRACE_EVENT(xchk_incomplete,
- TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip),
+ TP_PROTO(struct xfs_scrub *sc, void *ret_ip),
TP_ARGS(sc, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
);
TRACE_EVENT(xchk_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
);
TRACE_EVENT(xchk_ifork_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
);
TRACE_EVENT(xchk_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
);
TRACE_EVENT(xchk_ifork_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
);
DECLARE_EVENT_CLASS(xchk_sbtree_class,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level),
TP_ARGS(sc, cur, level),
TP_STRUCT__entry(
)
#define DEFINE_SCRUB_SBTREE_EVENT(name) \
DEFINE_EVENT(xchk_sbtree_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, \
int level), \
TP_ARGS(sc, cur, level))
DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_key);
TRACE_EVENT(xchk_xref_error,
- TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
+ TP_PROTO(struct xfs_scrub *sc, int error, void *ret_ip),
TP_ARGS(sc, error, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)