rrec->rm_offset = 0;
/* account for refc btree root */
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
rrec = XFS_RMAP_REC_ADDR(block, 5);
rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
rrec->rm_blockcount = cpu_to_be32(1);
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
agf->agf_roots[XFS_BTNUM_RMAPi] =
cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
agf->agf_freeblks = cpu_to_be32(tmpsize);
agf->agf_longest = cpu_to_be32(tmpsize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
agf->agf_refcount_root = cpu_to_be32(
xfs_refc_block(mp));
agf->agf_refcount_level = cpu_to_be32(1);
__be32 *agfl_bno;
int bucket;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(id->agno);
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
agi->agi_freecount = 0;
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
- if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (xfs_has_finobt(mp)) {
agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
agi->agi_free_level = cpu_to_be32(1);
}
.ops = &xfs_finobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_FINO,
- .need_init = xfs_sb_version_hasfinobt(&mp->m_sb)
+ .need_init = xfs_has_finobt(mp)
},
{ /* RMAP root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_rmapbt_buf_ops,
.work = &xfs_rmaproot_init,
- .need_init = xfs_sb_version_hasrmapbt(&mp->m_sb)
+ .need_init = xfs_has_rmapbt(mp)
},
{ /* REFC root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
.ops = &xfs_refcountbt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_REFC,
- .need_init = xfs_sb_version_hasreflink(&mp->m_sb)
+ .need_init = xfs_has_reflink(mp)
},
{ /* NULL terminating block */
.daddr = XFS_BUF_DADDR_NULL,
{
unsigned int size = mp->m_sb.sb_sectsize;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
size -= sizeof(struct xfs_agfl);
return size / sizeof(xfs_agblock_t);
xfs_refc_block(
struct xfs_mount *mp)
{
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
return XFS_RMAP_BLOCK(mp) + 1;
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
return XFS_FIBT_BLOCK(mp) + 1;
return XFS_IBT_BLOCK(mp) + 1;
}
xfs_prealloc_blocks(
struct xfs_mount *mp)
{
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
return xfs_refc_block(mp) + 1;
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
return XFS_RMAP_BLOCK(mp) + 1;
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
return XFS_FIBT_BLOCK(mp) + 1;
return XFS_IBT_BLOCK(mp) + 1;
}
blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
blocks += XFS_ALLOC_AGFL_RESERVE;
blocks += 3; /* AGF, AGI btree root blocks */
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
blocks++; /* finobt root block */
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
blocks++; /* rmap root block */
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
blocks++; /* refcount root block */
return mp->m_sb.sb_agblocks - blocks;
* AGFL is what the AGF says is active. We can't get to the AGF, so we
* can't verify just those entries are valid.
*/
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return NULL;
if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
* AGFL is what the AGF says is active. We can't get to the AGF, so we
* can't verify just those entries are valid.
*/
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
xfs_failaddr_t fa;
/* no verification of non-crc AGFLs */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
fa = xfs_agfl_verify(bp);
int active;
/* no agfl header on v4 supers */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return false;
/*
struct xfs_mount *mp = bp->b_mount;
struct xfs_agf *agf = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > mp->m_ag_maxlevels)
return __this_address;
- if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
+ if (xfs_has_rmapbt(mp) &&
(be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > mp->m_rmap_maxlevels))
return __this_address;
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
atomic_inc(&pag->pag_ref);
cur->bc_ag.pag = pag;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur;
* Btree block header size depends on a superblock flag.
*/
#define XFS_ALLOC_BLOCK_LEN(mp) \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ (xfs_has_crc(((mp))) ? \
XFS_BTREE_SBLOCK_CRC_LEN : XFS_BTREE_SBLOCK_LEN)
/*
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
- !(xfs_sb_version_hasattr2(&mp->m_sb))) {
+ !(xfs_has_attr2(mp))) {
spin_lock(&mp->m_sb_lock);
- if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
- xfs_sb_version_addattr2(&mp->m_sb);
+ if (!xfs_has_attr2(mp)) {
+ xfs_add_attr2(mp);
spin_unlock(&mp->m_sb_lock);
xfs_log_sb(tp);
} else
xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF);
bp2->b_ops = bp1->b_ops;
memcpy(bp2->b_addr, bp1->b_addr, args->geo->blksize);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_blkinfo *hdr3 = bp2->b_addr;
hdr3->blkno = cpu_to_be64(bp2->b_bn);
}
memset(&ichdr, 0, sizeof(ichdr));
ichdr.firstused = args->geo->blksize;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
struct xfs_mount *mp,
int attrlen)
{
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
return (attrlen + buflen - 1) / buflen;
}
int blksize = mp->m_attr_geo->blksize;
/* no verification of non-crc buffers */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return 0;
ptr = bp->b_addr;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
ptr = bp->b_addr;
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return 0;
rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
byte_cnt = min(*valuelen, byte_cnt);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
xfs_trans_log_inode(tp, ip, logflags);
if (error)
goto trans_cancel;
- if (!xfs_sb_version_hasattr(&mp->m_sb) ||
- (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
+ if (!xfs_has_attr(mp) ||
+ (!xfs_has_attr2(mp) && version == 2)) {
bool log_sb = false;
spin_lock(&mp->m_sb_lock);
- if (!xfs_sb_version_hasattr(&mp->m_sb)) {
- xfs_sb_version_addattr(&mp->m_sb);
+ if (!xfs_has_attr(mp)) {
+ xfs_add_attr(mp);
log_sb = true;
}
- if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
- xfs_sb_version_addattr2(&mp->m_sb);
+ if (!xfs_has_attr2(mp) && version == 2) {
+ xfs_add_attr2(mp);
log_sb = true;
}
spin_unlock(&mp->m_sb_lock);
xfs_bmbt_key_t *tkp;
__be64 *tpp;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
&mp->m_sb.sb_meta_uuid));
cur->bc_ops = &xfs_bmbt_ops;
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork);
* Btree block header size depends on a superblock flag.
*/
#define XFS_BMBT_BLOCK_LEN(mp) \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ (xfs_has_crc(((mp))) ? \
XFS_BTREE_LBLOCK_CRC_LEN : XFS_BTREE_LBLOCK_LEN)
#define XFS_BMBT_REC_ADDR(mp, block, index) \
{
struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum;
- int crc = xfs_sb_version_hascrc(&mp->m_sb);
+ int crc = xfs_has_crc(mp);
if (crc) {
if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
{
struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum;
- int crc = xfs_sb_version_hascrc(&mp->m_sb);
+ int crc = xfs_has_crc(mp);
if (crc) {
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_mount *mp = bp->b_mount;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
return false;
return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_mount *mp = bp->b_mount;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
return false;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
__u64 owner,
unsigned int flags)
{
- int crc = xfs_sb_version_hascrc(&mp->m_sb);
+ int crc = xfs_has_crc(mp);
__u32 magic = xfs_btree_magic(crc, btnum);
buf->bb_magic = cpu_to_be32(magic);
struct xfs_mount *mp = bp->b_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return __this_address;
if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return __this_address;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
node = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
#define XFS_ATTR3_RMT_CRC_OFF offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize) \
- ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+ ((bufsize) - (xfs_has_crc((mp)) ? \
sizeof(struct xfs_attr3_rmt_hdr) : 0))
/* Number of bytes in a directory block. */
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
bp->b_ops = &xfs_dir3_block_buf_ops;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_BLOCK_BUF);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
memset(hdr3, 0, sizeof(*hdr3));
hdr3->magic = cpu_to_be32(XFS_DIR3_BLOCK_MAGIC);
hdr3->blkno = cpu_to_be64(bp->b_bn);
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
* Initialize the header.
*/
hdr = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
memset(hdr3, 0, sizeof(*hdr3));
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
ASSERT(type == XFS_DIR2_LEAF1_MAGIC || type == XFS_DIR2_LEAFN_MAGIC);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
memset(leaf3, 0, sizeof(*leaf3));
if (!xfs_verify_magic(bp, hdr->magic))
return __this_address;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
firstdb = (xfs_dir2_da_to_db(mp->m_dir_geo, fbno) -
xfs_dir2_byte_to_db(mp->m_dir_geo, XFS_DIR2_FREE_OFFSET)) *
maxbests;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (be32_to_cpu(hdr3->firstdb) != firstdb)
memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
memset(&hdr, 0, sizeof(hdr));
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
hdr.magic = XFS_DIR3_FREE_MAGIC;
* if there is a filetype field, add the extra byte to the namelen
* for each entry that we see.
*/
- has_ftype = xfs_sb_version_hasftype(&mp->m_sb) ? 1 : 0;
+ has_ftype = xfs_has_ftype(mp) ? 1 : 0;
count = i8count = namelen = 0;
btp = xfs_dir2_block_tail_p(geo, hdr);
struct xfs_dqblk *dqb,
xfs_dqid_t id) /* used only during quotacheck */
{
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
dqb->dd_diskdq.d_type = type;
dqb->dd_diskdq.d_id = cpu_to_be32(id);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid);
xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
int ndquots;
int i;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return true;
/*
sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
}
-static inline bool xfs_sb_version_hasprojid32bit(struct xfs_sb *sbp)
+static inline bool xfs_sb_version_hasprojid32(struct xfs_sb *sbp)
{
return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
(xfs_sb_version_hasmorebits(sbp) &&
return version == 1 || version == 2;
}
-static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
+static inline bool xfs_sb_version_haspquotino(struct xfs_sb *sbp)
{
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
}
#define XFS_SYMLINK_MAPS 3
#define XFS_SYMLINK_BUF_SPACE(mp, bufsize) \
- ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+ ((bufsize) - (xfs_has_crc((mp)) ? \
sizeof(struct xfs_dsymlink_hdr) : 0))
typedef __be32 xfs_rmap_ptr_t;
#define XFS_RMAP_BLOCK(mp) \
- (xfs_sb_version_hasfinobt(&((mp)->m_sb)) ? \
+ (xfs_has_finobt(((mp))) ? \
XFS_FIBT_BLOCK(mp) + 1 : \
XFS_IBT_BLOCK(mp) + 1)
* limited only by the maximum size of the xattr that stores the information.
*/
#define XFS_ACL_MAX_ENTRIES(mp) \
- (xfs_sb_version_hascrc(&mp->m_sb) \
+ (xfs_has_crc(mp) \
? (XFS_XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
sizeof(struct xfs_acl_entry) \
: 25)
union xfs_btree_rec rec;
rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
- if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
+ if (xfs_has_sparseinodes(cur->bc_mp)) {
rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
rec.inobt.ir_u.sp.ir_count = irec->ir_count;
rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
struct xfs_inobt_rec_incore *irec)
{
irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
- if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
+ if (xfs_has_sparseinodes(mp)) {
irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
irec->ir_count = rec->inobt.ir_u.sp.ir_count;
irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
struct xfs_agi *agi = bp->b_addr;
int i;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
return __this_address;
- if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+ if (xfs_has_finobt(mp) &&
(be32_to_cpu(agi->agi_free_level) < 1 ||
be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
return __this_address;
struct xfs_mount *mp = bp->b_mount;
xfs_failaddr_t fa;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (bip)
int has;
int error;
- if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (!xfs_has_sparseinodes(mp))
return 0;
pag = xfs_perag_get(mp, agno);
union xfs_btree_rec *rec)
{
rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
- if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
+ if (xfs_has_sparseinodes(cur->bc_mp)) {
rec->inobt.ir_u.sp.ir_holemask =
cpu_to_be16(cur->bc_rec.i.ir_holemask);
rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
/* take a reference for the cursor */
xfs_extlen_t tree_len = 0;
int error;
- if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (!xfs_has_finobt(mp))
return 0;
if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
* Btree block header size depends on a superblock flag.
*/
#define XFS_INOBT_BLOCK_LEN(mp) \
- (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+ (xfs_has_crc(((mp))) ? \
XFS_BTREE_SBLOCK_CRC_LEN : XFS_BTREE_SBLOCK_LEN)
/*
/* don't allow reflink/cowextsize if we don't have reflink */
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
- !xfs_sb_version_hasreflink(&mp->m_sb))
+ !xfs_has_reflink(mp))
return __this_address;
/* only regular files get reflink */
if (dip->di_version < 3)
return;
- ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
+ ASSERT(xfs_has_crc(mp));
crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF);
dip->di_crc = xfs_end_cksum(crc);
hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
- if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
+ if (hint_flag && !xfs_has_reflink(mp))
return __this_address;
if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
#define XFS_MIN_LOG_FACTOR 3
#define XLOG_REC_SHIFT(log) \
- BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
+ BTOBB(1 << (xfs_has_logv2(log->l_mp) ? \
XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
#define XLOG_TOTAL_REC_SHIFT(log) \
- BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
+ BTOBB(XLOG_MAX_ICLOGS << (xfs_has_logv2(log->l_mp) ? \
XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
/* get lsn fields */
if (tres.tr_logcount > 1)
max_logres *= tres.tr_logcount;
- if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
+ if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
lsunit = BTOBB(mp->m_sb.sb_logsunit);
/*
if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return __this_address;
fa = xfs_btree_sblock_v5hdr_verify(bp);
if (fa)
xfs_extlen_t tree_len;
int error;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 0;
error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
struct xfs_btree_cur *cur;
int error;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
struct xfs_btree_cur *cur;
int error;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
struct xfs_mount *mp,
int whichfork)
{
- return xfs_sb_version_hasrmapbt(&mp->m_sb) && whichfork != XFS_COW_FORK;
+ return xfs_has_rmapbt(mp) && whichfork != XFS_COW_FORK;
}
/*
if (!xfs_verify_magic(bp, block->bb_magic))
return __this_address;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return __this_address;
fa = xfs_btree_sblock_v5hdr_verify(bp);
if (fa)
* disallow reflinking when less than 10% of the per-AG metadata
* block reservation since the fallback is a regular file copy.
*/
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
else
mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
xfs_extlen_t tree_len;
int error;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
error = xfs_alloc_read_agf(mp, tp, pag->pag_agno, 0, &agbp);
return -EWRONGFS;
}
- if (xfs_sb_version_has_pquotino(sbp)) {
+ if (xfs_sb_version_haspquotino(sbp)) {
if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
xfs_notice(mp,
"Version 5 of Super block has XFS_OQUOTA bits.");
XFS_FSB_TO_B(mp, sbp->sb_width), 0, false))
return -EFSCORRUPTED;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_sb_version_hascrc(sbp) &&
sbp->sb_blocksize < XFS_MIN_CRC_BLOCKSIZE) {
xfs_notice(mp, "v5 SB sanity check failed");
return -EFSCORRUPTED;
* We need to do these manipilations only if we are working
* with an older version of on-disk superblock.
*/
- if (xfs_sb_version_has_pquotino(sbp))
+ if (xfs_sb_version_haspquotino(sbp))
return;
if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
uint16_t qflags = from->sb_qflags;
to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
- if (xfs_sb_version_has_pquotino(from)) {
+ if (xfs_sb_version_haspquotino(from)) {
to->sb_qflags = cpu_to_be16(from->sb_qflags);
to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
if (error)
goto out_error;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_sb_version_hascrc(&sb))
return;
if (bip)
geo->flags |= XFS_FSOP_GEOM_FLAGS_LAZYSB;
if (xfs_sb_version_hasattr2(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR2;
- if (xfs_sb_version_hasprojid32bit(sbp))
+ if (xfs_sb_version_hasprojid32(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_PROJID32;
if (xfs_sb_version_hascrc(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_V5SB;
{
struct xfs_dsymlink_hdr *dsl = bp->b_addr;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return 0;
memset(dsl, 0, sizeof(struct xfs_dsymlink_hdr));
struct xfs_mount *mp = bp->b_mount;
struct xfs_dsymlink_hdr *dsl = bp->b_addr;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return __this_address;
if (!xfs_verify_magic(bp, dsl->sl_magic))
return __this_address;
xfs_failaddr_t fa;
/* no verification of non-crc buffers */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF))
xfs_failaddr_t fa;
/* no verification of non-crc buffers */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
fa = xfs_symlink_verify(bp);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
bp->b_ops = NULL;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
uint blocks;
blocks = num_ops * 2 * (2 * mp->m_ag_maxlevels - 1);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
blocks += num_ops * (2 * mp->m_refc_maxlevels - 1);
return blocks;
xfs_calc_finobt_res(
struct xfs_mount *mp)
{
- if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (!xfs_has_finobt(mp))
return 0;
return xfs_calc_inobt_res(mp);
* require a permanent reservation on space.
*/
resp->tr_write.tr_logres = xfs_calc_write_reservation(mp);
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK;
else
resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp);
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
resp->tr_itruncate.tr_logcount =
XFS_ITRUNCATE_LOG_COUNT_REFLINK;
else
resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp);
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT_REFLINK;
else
resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
xfs_ino_t ino)
{
return ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
- (xfs_sb_version_hasquota(&mp->m_sb) &&
+ (xfs_has_quota(mp) &&
xfs_is_quota_inode(&mp->m_sb, ino));
}
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
xchk_block_set_corrupt(sc, bp);
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
/* all v5 fields must be zero */
if (memchr_inv(&sb->sb_features_compat, 0,
sizeof(struct xfs_dsb) -
/* Don't care about sb_lsn */
}
- if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
+ if (xfs_has_metauuid(mp)) {
/* The metadata UUID must be the same for all supers */
if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
xchk_block_set_corrupt(sc, bp);
* No rmap cursor; we can't xref if we have the rmapbt feature.
* We also can't do it if we're missing the free space btree cursors.
*/
- if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
+ if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
!sc->sa.bno_cur || !sc->sa.cnt_cur)
return;
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
if (!xfs_verify_agbno(mp, agno, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
agbno = be32_to_cpu(agf->agf_refcount_root);
if (!xfs_verify_agbno(mp, agno, agbno))
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
- if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (xfs_has_finobt(mp)) {
agbno = be32_to_cpu(agi->agi_free_root);
if (!xfs_verify_agbno(mp, agno, agbno))
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
return -EFSCORRUPTED;
/* We must find the refcountbt root if that feature is enabled. */
- if (xfs_sb_version_hasreflink(&sc->mp->m_sb) &&
+ if (xfs_has_reflink(sc->mp) &&
!xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
return -EFSCORRUPTED;
agf->agf_flfirst = old_agf->agf_flfirst;
agf->agf_fllast = old_agf->agf_fllast;
agf->agf_flcount = old_agf->agf_flcount;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
/* Mark the incore AGF data stale until we're done fixing things. */
agf->agf_levels[XFS_BTNUM_RMAPi] =
cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
- if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
+ if (xfs_has_reflink(sc->mp)) {
agf->agf_refcount_root =
cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
agf->agf_refcount_level =
agf->agf_btreeblks = cpu_to_be32(btreeblks);
/* Update the AGF counters from the refcountbt. */
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
sc->sa.pag);
error = xfs_btree_count_blocks(cur, &blocks);
int error;
/* We require the rmapbt to rebuild anything. */
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return -EOPNOTSUPP;
/*
int error;
/* We require the rmapbt to rebuild anything. */
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return -EOPNOTSUPP;
xbitmap_init(&agfl_extents);
return -EFSCORRUPTED;
/* We must find the finobt root if that feature is enabled. */
- if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+ if (xfs_has_finobt(mp) &&
!xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
return -EFSCORRUPTED;
sc->sa.pag->pag_agno));
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
/* We don't know how to fix the unlinked list yet. */
agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
- if (xfs_sb_version_hasfinobt(&sc->mp->m_sb)) {
+ if (xfs_has_finobt(sc->mp)) {
agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
}
int error;
/* We require the rmapbt to rebuild anything. */
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return -EOPNOTSUPP;
/*
bitmap_zero(usedmap, mp->m_attr_geo->blksize);
/* Check all the padding. */
- if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
+ if (xfs_has_crc(ds->sc->mp)) {
struct xfs_attr3_leafblock *leaf = bp->b_addr;
if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
* Check the owners of the btree blocks up to the level below
* the root since the verifiers don't do that.
*/
- if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
+ if (xfs_has_crc(bs->cur->bc_mp) &&
bs->cur->bc_ptrs[0] == 1) {
for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
block = xfs_btree_get_block(bs->cur, i, &bp);
bool zero_size;
int error;
- if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
+ if (!xfs_has_rmapbt(sc->mp) ||
whichfork == XFS_COW_FORK ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return 0;
return error;
/* Look for incorrect shared blocks. */
- if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
+ if (xfs_has_reflink(sc->mp)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
pmaxrecs = &ds->maxrecs[level];
/* We only started zeroing the header on v5 filesystems. */
- if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
+ if (xfs_has_crc(ds->sc->mp) && hdr3->hdr.pad)
xchk_da_set_corrupt(ds, level);
/* Check the owner. */
- if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
+ if (xfs_has_crc(ip->i_mount)) {
owner = be64_to_cpu(hdr3->owner);
if (owner != ip->i_ino)
xchk_da_set_corrupt(ds, level);
int ino_dtype;
int error = 0;
- if (!xfs_sb_version_hasftype(&mp->m_sb)) {
+ if (!xfs_has_ftype(mp)) {
if (dtype != DT_UNKNOWN && dtype != DT_DIR)
xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
if (!strncmp(".", name, namelen)) {
/* If this is "." then check that the inum matches the dir. */
- if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
+ if (xfs_has_ftype(mp) && type != DT_DIR)
xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
checked_ftype = true;
* If this is ".." in the root inode, check that the inum
* matches this dir.
*/
- if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
+ if (xfs_has_ftype(mp) && type != DT_DIR)
xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
checked_ftype = true;
bestcount = be32_to_cpu(ltp->bestcount);
bestp = xfs_dir2_leaf_bests_p(ltp);
- if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
+ if (xfs_has_crc(sc->mp)) {
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
return error;
xchk_buffer_recheck(sc, bp);
- if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
+ if (xfs_has_crc(sc->mp)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
int error;
if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
- (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
+ (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
xchk_skip_xref(sc->sm))
return;
/* reflink flag requires reflink feature */
if ((flags2 & XFS_DIFLAG2_REFLINK) &&
- !xfs_sb_version_hasreflink(&mp->m_sb))
+ !xfs_has_reflink(mp))
goto bad;
/* cowextsize flag is checked w.r.t. mode separately */
xchk_ino_set_corrupt(sc, ino);
if (dip->di_projid_hi != 0 &&
- !xfs_sb_version_hasprojid32bit(&mp->m_sb))
+ !xfs_has_projid32(mp))
xchk_ino_set_corrupt(sc, ino);
break;
default:
bool has_shared;
int error;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return;
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
* bnobt/cntbt or inobt/finobt as pairs.
*/
bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
inobt_sz = xfs_iallocbt_calc_size(mp, icount /
XFS_INODES_PER_HOLEMASK_BIT);
else
inobt_sz = xfs_iallocbt_calc_size(mp, icount /
XFS_INODES_PER_CHUNK);
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
inobt_sz *= 2;
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
else
refcbt_sz = 0;
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
/*
* Guess how many blocks we need to rebuild the rmapbt.
* For non-reflink filesystems we can't have more records than
* many rmaps there could be in the AG, so we start off with
* what we hope is an generous over-estimation.
*/
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
rmapbt_sz = xfs_rmapbt_calc_size(mp,
(unsigned long long)aglen * 2);
else
xfs_fsblock_t fsbno;
int error = 0;
- ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
+ ASSERT(xfs_has_rmapbt(sc->mp));
for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
ASSERT(sc->ip != NULL ||
*/
if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) {
error = -EOPNOTSUPP;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
goto out;
error = -EROFS;
* If we have to use the (expensive) rmap swap method, we can
* handle any number of extents and any format.
*/
- if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
+ if (xfs_has_rmapbt(ip->i_mount))
return 0;
/*
* event of a crash. Set the owner change log flags now and leave the
* bmbt scan as the last step.
*/
- if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ if (xfs_has_v3inodes(ip->i_mount)) {
if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*target_log_flags) |= XFS_ILOG_DOWNER;
if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
- ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
+ ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
(*src_log_flags & XFS_ILOG_DOWNER));
(*src_log_flags) |= XFS_ILOG_DBROOT;
break;
break;
case XFS_DINODE_FMT_BTREE:
(*target_log_flags) |= XFS_ILOG_DBROOT;
- ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
+ ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
(*target_log_flags & XFS_ILOG_DOWNER));
break;
}
* a block reservation because it's really just a remap operation
* performed with log redo items!
*/
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
int w = XFS_DATA_FORK;
uint32_t ipnext = ip->i_df.if_nextents;
uint32_t tipnext = tip->i_df.if_nextents;
src_log_flags = XFS_ILOG_CORE;
target_log_flags = XFS_ILOG_CORE;
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
error = xfs_swap_extent_rmap(&tp, ip, tip);
else
error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
}
/* Swap the cow forks. */
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
ASSERT(!ip->i_cowfp ||
ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
ASSERT(!tip->i_cowfp ||
* non-crc filesystems don't attach verifiers during
* log recovery, so don't warn for such filesystems.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
xfs_warn(mp,
"%s: no buf ops on daddr 0x%llx len %d",
__func__, bp->b_bn, bp->b_length);
struct xfs_mount *mp = bp->b_mount;
int idx;
- idx = xfs_sb_version_hascrc(&mp->m_sb);
+ idx = xfs_has_crc(mp);
if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
return false;
return dmagic == bp->b_ops->magic[idx];
struct xfs_mount *mp = bp->b_mount;
int idx;
- idx = xfs_sb_version_hascrc(&mp->m_sb);
+ idx = xfs_has_crc(mp);
if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
return false;
return dmagic == bp->b_ops->magic16[idx];
* occurs during recovery.
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
- if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
+ if (xfs_has_v3inodes(lip->li_mountp) ||
!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(lip)))
bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
* inconsistent state resulting in verification failures. Hence for now
* just avoid the verification stage for non-crc filesystems
*/
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
* Post recovery validation only works properly on CRC enabled
* filesystems.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
bp->b_ops = &xfs_inode_buf_ops;
inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
uint16_t blft;
/* v4 filesystems always recover immediately */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
goto recover_immediately;
/*
* the relevant UUID in the superblock.
*/
lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
- if (xfs_sb_version_hasmetauuid(&mp->m_sb))
+ if (xfs_has_metauuid(mp))
uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
else
uuid = &((struct xfs_dsb *)blk)->sb_uuid;
struct xfs_mount *mp,
uint8_t filetype)
{
- if (!xfs_sb_version_hasftype(&mp->m_sb))
+ if (!xfs_has_ftype(mp))
return DT_UNKNOWN;
if (filetype >= XFS_DIR3_FT_MAX)
d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
d->dd_diskdq.d_id = cpu_to_be32(curid);
d->dd_diskdq.d_type = type;
- if (curid > 0 && xfs_sb_version_hasbigtime(&mp->m_sb))
+ if (curid > 0 && xfs_has_bigtime(mp))
d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
* expect an exact match for user dquots and for non-root group and
* project dquots.
*/
- if (xfs_sb_version_hascrc(&dqp->q_mount->m_sb) ||
+ if (xfs_has_crc(dqp->q_mount) ||
dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
return ddqp_type == dqp_type;
/* bigtime flag should never be set on root dquots */
if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
- if (!xfs_sb_version_hasbigtime(&dqp->q_mount->m_sb))
+ if (!xfs_has_bigtime(dqp->q_mount))
return __this_address;
if (dqp->q_id == 0)
return __this_address;
* buffer always has a valid CRC. This ensures there is no possibility
* of a dquot without an up-to-date CRC getting to disk.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
* If the dquot has an LSN in it, recover the dquot only if it's less
* than the lsn of the transaction we are replaying.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
}
memcpy(ddq, recddq, item->ri_buf[1].i_len);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
}
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return -EOPNOTSUPP;
if (XFS_FORCED_SHUTDOWN(mp))
int error;
*stat = false;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 0;
/* rt files will have no perag structure */
if (!info->pag)
return -EINVAL;
use_rmap = capable(CAP_SYS_ADMIN) &&
- xfs_sb_version_hasrmapbt(&mp->m_sb);
+ xfs_has_rmapbt(mp);
head->fmh_entries = 0;
/* Set up our device handlers. */
* particularly important for shrink because the write verifier
* will fail if sb_fdblocks is ever larger than sb_dblocks.
*/
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
xfs_log_sb(tp);
xfs_trans_set_sync(tp);
* value and hence we must also read the inode off disk even when
* initializing new inodes.
*/
- if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
+ if (xfs_has_v3inodes(mp) &&
(flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
VFS_I(ip)->i_generation = prandom_u32();
} else {
di_flags |= XFS_DIFLAG_PROJINHERIT;
} else if (S_ISREG(mode)) {
if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
- xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
+ xfs_has_realtime(ip->i_mount))
di_flags |= XFS_DIFLAG_REALTIME;
if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
di_flags |= XFS_DIFLAG_EXTSIZE;
ip->i_extsize = 0;
ip->i_diflags = 0;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
inode_set_iversion(inode, 1);
ip->i_cowextsize = 0;
ip->i_crtime = tv;
* this saves us from needing to run a separate transaction to set the
* fork offset in the immediate future.
*/
- if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
+ if (init_xattrs && xfs_has_attr(mp)) {
ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
}
* happen but we need to still do it to ensure backwards compatibility
* with old kernels that predate logging all inode changes.
*/
- if (!xfs_sb_version_has_v3inode(&mp->m_sb))
+ if (!xfs_has_v3inodes(mp))
ip->i_flushiter++;
/*
xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
/* Wrap, we never let the log put out DI_MAX_FLUSH */
- if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (!xfs_has_v3inodes(mp)) {
if (ip->i_flushiter == DI_MAX_FLUSH)
ip->i_flushiter = 0;
}
/* log a dummy value to ensure log structure is fully initialised */
to->di_next_unlinked = NULLAGINO;
- if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ if (xfs_has_v3inodes(ip->i_mount)) {
to->di_version = 3;
to->di_changecount = inode_peek_iversion(inode);
to->di_crtime = xfs_inode_to_log_dinode_ts(ip, ip->i_crtime);
* superblock flag to determine whether we need to look at di_flushiter
* to skip replay when the on disk inode is newer than the log one
*/
- if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
+ if (!xfs_has_v3inodes(mp) &&
ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
/*
* Deal with the wrap case, DI_MAX_FLUSH is less
/* diflags2 only valid for v3 inodes. */
i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
- if (i_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
+ if (i_flags2 && !xfs_has_v3inodes(mp))
return -EINVAL;
ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
if (!fa->fsx_valid)
return 0;
- /* Disallow 32bit project ids if projid32bit feature is not enabled. */
+ /* Disallow 32bit project ids if 32bit IDs are not enabled. */
if (fa->fsx_projid > (uint16_t)-1 &&
- !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
+ !xfs_has_projid32(ip->i_mount))
return -EINVAL;
return 0;
}
else
ip->i_extsize = 0;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
else
* but still hashed. This is incompatible with case-insensitive
* mode, so invalidate (unhash) the dentry in CI-mode.
*/
- if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb))
+ if (xfs_has_asciici(XFS_M(dir->i_sb)))
d_invalidate(dentry);
return 0;
}
stat->ctime = inode->i_ctime;
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime = ip->i_crtime;
}
if (!gid_eq(igid, gid)) {
if (XFS_IS_GQUOTA_ON(mp)) {
- ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
+ ASSERT(xfs_has_pquotino(mp) ||
!XFS_IS_PQUOTA_ON(mp));
ASSERT(mask & ATTR_GID);
ASSERT(gdqp);
inode->i_mapping->a_ops = &xfs_address_space_operations;
break;
case S_IFDIR:
- if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+ if (xfs_has_asciici(XFS_M(inode->i_sb)))
inode->i_op = &xfs_dir_ci_inode_operations;
else
inode->i_op = &xfs_dir_inode_operations;
buf->bs_forkoff = XFS_IFORK_BOFF(ip);
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
buf->bs_btime = ip->i_crtime.tv_sec;
buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
int num_bblks)
{
struct xlog *log;
- bool fatal = xfs_sb_version_hascrc(&mp->m_sb);
+ bool fatal = xfs_has_crc(mp);
int error = 0;
int min_logfsbs;
* handles this for us.
*/
need_covered = xfs_log_need_covered(mp);
- if (!need_covered && !xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (!need_covered && !xfs_has_lazysbcount(mp))
return 0;
/*
xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
- if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
+ if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
else
log->l_iclog_roundoff = BBSIZE;
xlog_grant_head_init(&log->l_write_head);
error = -EFSCORRUPTED;
- if (xfs_sb_version_hassector(&mp->m_sb)) {
+ if (xfs_has_sector(mp)) {
log2_size = mp->m_sb.sb_logsectlog;
if (log2_size < BBSHIFT) {
xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
/* for larger sector sizes, must have v2 or external log */
if (log2_size && log->l_logBBstart > 0 &&
- !xfs_sb_version_haslogv2(&mp->m_sb)) {
+ !xfs_has_logv2(mp)) {
xfs_warn(mp,
"log sector size (0x%x) invalid for configuration.",
log2_size);
memset(head, 0, sizeof(xlog_rec_header_t));
head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
head->h_version = cpu_to_be32(
- xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
+ xfs_has_logv2(log->l_mp) ? 2 : 1);
head->h_size = cpu_to_be32(log->l_iclog_size);
/* new fields */
head->h_fmt = cpu_to_be32(XLOG_FMT);
dp += BBSIZE;
}
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ if (xfs_has_logv2(log->l_mp)) {
xlog_in_core_2_t *xhdr = iclog->ic_data;
for ( ; i < BTOBB(size); i++) {
offsetof(struct xlog_rec_header, h_crc));
/* ... then for additional cycle data for v2 logs ... */
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ if (xfs_has_logv2(log->l_mp)) {
union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead;
int i;
int xheads;
/* real byte length */
size = iclog->ic_offset;
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb))
+ if (xfs_has_logv2(log->l_mp))
size += roundoff;
iclog->ic_header.h_len = cpu_to_be32(size);
static inline int
xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
{
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ if (xfs_has_logv2(log->l_mp)) {
int h_size = be32_to_cpu(rh->h_size);
if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
recp->h_cycle = cpu_to_be32(cycle);
recp->h_version = cpu_to_be32(
- xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
+ xfs_has_logv2(log->l_mp) ? 2 : 1);
recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
recp->h_fmt = cpu_to_be32(XLOG_FMT);
dp += BBSIZE;
}
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ if (xfs_has_logv2(log->l_mp)) {
xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
* the kernel from one that does not add CRCs by default.
*/
if (crc != old_crc) {
- if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+ if (old_crc || xfs_has_crc(log->l_mp)) {
xfs_alert(log->l_mp,
"log record CRC mismatch: found 0x%x, expected 0x%x.",
le32_to_cpu(old_crc),
* If the filesystem is CRC enabled, this mismatch becomes a
* fatal log corruption failure.
*/
- if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+ if (xfs_has_crc(log->l_mp)) {
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
}
* Read the header of the tail block and get the iclog buffer size from
* h_size. Use this to tell how many sectors make up the log header.
*/
- if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+ if (xfs_has_logv2(log->l_mp)) {
/*
* When using variable length iclogs, read first sector of
* iclog header and extract the header size from it. Get a
* could not be verified. Check the superblock LSN against the current
* LSN now that it's known.
*/
- if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
+ if (xfs_has_crc(log->l_mp) &&
!xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
return -EINVAL;
}
}
- if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
+ if (!xfs_has_dalign(mp)) {
xfs_warn(mp,
"cannot change alignment: superblock does not support data alignment");
return -EINVAL;
sbp->sb_width = mp->m_swidth;
mp->m_update_sb = true;
} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
- xfs_sb_version_hasdalign(&mp->m_sb)) {
+ xfs_has_dalign(mp)) {
mp->m_dalign = sbp->sb_unit;
mp->m_swidth = sbp->sb_width;
}
* superblock to be correct and we don't need to do anything here.
* Otherwise, recalculate the summary counters.
*/
- if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ if ((!xfs_has_lazysbcount(mp) ||
XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
!xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
return 0;
/* always use v2 inodes by default now */
if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
+ mp->m_features |= XFS_FEAT_NLINK;
mp->m_update_sb = true;
}
* cluster size. Full inode chunk alignment must match the chunk size,
* but that is checked on sb read verification...
*/
- if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
+ if (xfs_has_sparseinodes(mp) &&
mp->m_sb.sb_spino_align !=
XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
xfs_warn(mp,
* behaviour specified by the superblock feature bit.
*/
if (!(mp->m_flags & (XFS_MOUNT_ATTR2|XFS_MOUNT_NOATTR2)) &&
- xfs_sb_version_hasattr2(&mp->m_sb))
+ xfs_has_attr2(mp))
mp->m_flags |= XFS_MOUNT_ATTR2;
/*
xfs_force_summary_recalc(
struct xfs_mount *mp)
{
- if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (!xfs_has_lazysbcount(mp))
return;
xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
__XFS_HAS_FEAT(sparseinodes, SPINODES)
__XFS_HAS_FEAT(metauuid, META_UUID)
__XFS_HAS_FEAT(realtime, REALTIME)
-__XFS_HAS_FEAT(inobtcounts, REALTIME)
-__XFS_HAS_FEAT(bigtime, REALTIME)
-__XFS_HAS_FEAT(needsrepair, REALTIME)
+__XFS_HAS_FEAT(inobtcounts, INOBTCNT)
+__XFS_HAS_FEAT(bigtime, BIGTIME)
+__XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
/*
* Flags for m_flags.
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
- if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
+ if (xfs_has_bigtime(mp)) {
qinf->qi_expiry_min =
xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
qinf->qi_expiry_max =
* with PQUOTA, just use sb_gquotino for sb_pquotino and
* vice-versa.
*/
- if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
+ if (!xfs_has_pquotino(mp) &&
(flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
xfs_ino_t ino = NULLFSINO;
*/
spin_lock(&mp->m_sb_lock);
if (flags & XFS_QMOPT_SBVERSION) {
- ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
+ ASSERT(!xfs_has_quota(mp));
- xfs_sb_version_addquota(&mp->m_sb);
+ xfs_add_quota(mp);
mp->m_sb.sb_uquotino = NULLFSINO;
mp->m_sb.sb_gquotino = NULLFSINO;
mp->m_sb.sb_pquotino = NULLFSINO;
ddq->d_bwarns = 0;
ddq->d_iwarns = 0;
ddq->d_rtbwarns = 0;
- if (xfs_sb_version_hasbigtime(&mp->m_sb))
+ if (xfs_has_bigtime(mp))
ddq->d_type |= XFS_DQTYPE_BIGTIME;
}
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
xfs_update_cksum((char *)&dqb[j],
sizeof(struct xfs_dqblk),
XFS_DQUOT_CRC_OFF);
/*
* Get the uquota and gquota inodes
*/
- if (xfs_sb_version_hasquota(&mp->m_sb)) {
+ if (xfs_has_quota(mp)) {
if (XFS_IS_UQUOTA_ON(mp) &&
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
uint quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
- quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
+ quotaondisk = xfs_has_quota(mp) &&
(mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
if (quotaondisk) {
{
int error = -EINVAL;
- if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
+ if (!xfs_has_quota(mp) || flags == 0 ||
(flags & ~XFS_QMOPT_QUOTALL)) {
xfs_debug(mp, "%s: flags=%x m_qflags=%x",
__func__, flags, mp->m_qflags);
struct xfs_mount *mp,
struct xfs_phys_extent *refc)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return false;
if (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
xfs_agnumber_t agno;
int error = 0;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 0;
for_each_perag(mp, agno, pag) {
struct xfs_perag *pag;
int error = 0;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
pag = xfs_perag_get(mp, agno);
struct xfs_mount *mp,
struct xfs_map_extent *rmap)
{
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return false;
if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
return -EINVAL;
/* Unsupported realtime features. */
- if (xfs_sb_version_hasrmapbt(&mp->m_sb) ||
- xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
return -EOPNOTSUPP;
nrblocks = in->newblocks;
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
unsigned int log_sector_size = BBSIZE;
- if (xfs_sb_version_hassector(&mp->m_sb))
+ if (xfs_has_sector(mp))
log_sector_size = mp->m_sb.sb_logsectsize;
error = xfs_setsize_buftarg(mp->m_logdev_targp,
log_sector_size);
int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
/* Fail a mount where the logbuf is smaller than the log stripe */
- if (xfs_sb_version_haslogv2(&mp->m_sb)) {
+ if (xfs_has_logv2(mp)) {
if (mp->m_logbsize <= 0 &&
mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
mp->m_logbsize = mp->m_sb.sb_logsunit;
/*
* V5 filesystems always use attr2 format for attributes.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
(mp->m_flags & XFS_MOUNT_NOATTR2)) {
xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
"attr2 is always enabled for V5 filesystems.");
if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
(mp->m_qflags & XFS_PQUOTA_ACCT) &&
- !xfs_sb_version_has_pquotino(&mp->m_sb)) {
+ !xfs_has_pquotino(mp)) {
xfs_warn(mp,
"Super block does not support project and group quota together");
return -EINVAL;
goto out_free_sb;
/* V4 support is undergoing deprecation. */
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
#ifdef CONFIG_XFS_SUPPORT_V4
xfs_warn_once(mp,
"Deprecated V4 format (crc=0) will not be supported after September 2030.");
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_max_links = XFS_MAXLINK;
sb->s_time_gran = 1;
- if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
+ if (xfs_has_bigtime(mp)) {
sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
} else {
"DAX unsupported by block device. Turning off DAX.");
xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
}
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
xfs_alert(mp,
"DAX and reflink cannot be used together!");
error = -EINVAL;
}
}
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
if (mp->m_sb.sb_rblocks) {
xfs_alert(mp,
"reflink not compatible with realtime device!");
}
}
- if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
+ if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
xfs_alert(mp,
"reverse mapping btree not compatible with realtime device!");
error = -EINVAL;
byte_cnt = pathlen;
cur_chunk = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!xfs_symlink_hdr_ok(ip->i_ino, offset,
byte_cnt, bp)) {
error = -EFSCORRUPTED;
WARN_ON(resp->tr_logres > 0 &&
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) ||
- xfs_sb_version_haslazysbcount(&mp->m_sb));
+ xfs_has_lazysbcount(mp));
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
tp->t_flags = flags;
switch (field) {
case XFS_TRANS_SB_ICOUNT:
tp->t_icount_delta += delta;
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_IFREE:
tp->t_ifree_delta += delta;
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_FDBLOCKS:
delta -= blkres_delta;
}
tp->t_fdblocks_delta += delta;
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_RES_FDBLOCKS:
* be applied to the on-disk superblock.
*/
tp->t_res_fdblocks_delta += delta;
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
flags &= ~XFS_TRANS_SB_DIRTY;
break;
case XFS_TRANS_SB_FREXTENTS:
/*
* Only update the superblock counters if we are logging them
*/
- if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
+ if (!xfs_has_lazysbcount((tp->t_mountp))) {
if (tp->t_icount_delta)
be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
if (tp->t_ifree_delta)
if (tp->t_blk_res > 0)
blkdelta = tp->t_blk_res;
if ((tp->t_fdblocks_delta != 0) &&
- (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ (xfs_has_lazysbcount(mp) ||
(tp->t_flags & XFS_TRANS_SB_DIRTY)))
blkdelta += tp->t_fdblocks_delta;
(tp->t_flags & XFS_TRANS_SB_DIRTY))
rtxdelta += tp->t_frextents_delta;
- if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ if (xfs_has_lazysbcount(mp) ||
(tp->t_flags & XFS_TRANS_SB_DIRTY)) {
idelta = tp->t_icount_delta;
ifreedelta = tp->t_ifree_delta;
/* Upgrade the dquot to bigtime format if possible. */
if (dqp->q_id != 0 &&
- xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) &&
+ xfs_has_bigtime(tp->t_mountp) &&
!(dqp->q_type & XFS_DQTYPE_BIGTIME))
dqp->q_type |= XFS_DQTYPE_BIGTIME;