memcpy(node_blk->i.i_name, de->name, de->len);
node_blk->i.i_name[de->len] = 0;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
node_blk->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
}
de->link = NULL;
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
node_blk->i.i_inode_checksum =
cpu_to_le32(f2fs_inode_chksum(node_blk));
}
int need_fix = 0, ret = 0;
int type;
- if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
+ if (get_sb(feature) & F2FS_FEATURE_RO)
return 0;
segno = GET_SEGNO(sbi, blk_addr);
int need_fix = 0, ret = 0;
int type;
- if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
+ if (get_sb(feature) & F2FS_FEATURE_RO)
return 0;
segno = GET_SEGNO(sbi, blk_addr);
u32 i_links = le32_to_cpu(node_blk->i.i_links);
u64 i_size = le64_to_cpu(node_blk->i.i_size);
u64 i_blocks = le64_to_cpu(node_blk->i.i_blocks);
- bool compr_supported = c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION);
+ bool compr_supported = c.feature & F2FS_FEATURE_COMPRESSION;
u32 i_flags = le32_to_cpu(node_blk->i.i_flags);
bool compressed = i_flags & F2FS_COMPR_FL;
bool compr_rel = node_blk->i.i_inline & F2FS_COMPRESS_RELEASED;
child.last_blk = 0;
if (f2fs_has_extra_isize(&node_blk->i)) {
- if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
unsigned int isize =
le16_to_cpu(node_blk->i.i_extra_isize);
if (isize > 4 * DEF_ADDRS_PER_INODE) {
}
}
- if ((c.feature &
- cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) &&
+ if ((c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR) &&
(node_blk->i.i_inline & F2FS_INLINE_XATTR)) {
unsigned int inline_size =
le16_to_cpu(node_blk->i.i_inline_xattr_size);
if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
(ftype != F2FS_FT_DIR ||
- !(c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)))) {
+ !(c.feature & F2FS_FEATURE_CASEFOLD))) {
ASSERT_MSG("[0x%x] unexpected casefold flag", nid);
if (c.fix_on) {
FIX_MSG("ino[0x%x] clear casefold flag", nid);
if (need_fix && f2fs_dev_is_writable())
node_blk->i.i_ext.len = 0;
- if ((c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) &&
+ if ((c.feature & F2FS_FEATURE_INODE_CHKSUM) &&
f2fs_has_extra_isize(&node_blk->i)) {
__u32 provided, calculated;
struct seg_entry *se;
int j, nblocks;
- if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO) &&
+ if ((get_sb(feature) & F2FS_FEATURE_RO) &&
type != CURSEG_HOT_DATA && type != CURSEG_HOT_NODE)
return 0;
se = get_seg_entry(sbi, curseg->segno);
sum_blk = curseg->sum_blk;
- if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if ((get_sb(feature) & F2FS_FEATURE_RO) &&
(i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE))
continue;
force = 1;
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
+ if (c.feature & F2FS_FEATURE_LOST_FOUND) {
for (i = 0; i < fsck->nr_nat_entries; i++)
if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
break;
MSG(0, "\tError: Unknown flag %s\n", token);
fsck_usage();
}
- c.feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
+ c.feature |= F2FS_FEATURE_CASEFOLD;
break;
case 'V':
show_version(prog);
cbc.cnt = 0;
cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
+ if (c.feature & F2FS_FEATURE_QUOTA_INO) {
ret = quota_init_context(sbi);
if (ret) {
ASSERT_MSG("quota_init_context failure: %d", ret);
{
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
- if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (get_sb(feature) & F2FS_FEATURE_RO) {
MSG(0, "Not support on readonly image.\n");
return -1;
}
le32_to_cpu(inode->i_ext.blk_addr),
le32_to_cpu(inode->i_ext.len));
- if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
DISP_u16(inode, i_extra_isize);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
+ if (c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)
DISP_u16(inode, i_inline_xattr_size);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
+ if (c.feature & F2FS_FEATURE_PRJQUOTA)
DISP_u32(inode, i_projid);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
DISP_u32(inode, i_inode_checksum);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
+ if (c.feature & F2FS_FEATURE_INODE_CRTIME) {
DISP_u64(inode, i_crtime);
DISP_u32(inode, i_crtime_nsec);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ if (c.feature & F2FS_FEATURE_COMPRESSION) {
DISP_u64(inode, i_compr_blocks);
DISP_u8(inode, i_compress_algorithm);
DISP_u8(inode, i_log_cluster_size);
void print_sb_state(struct f2fs_super_block *sb)
{
- __le32 f = sb->feature;
+ unsigned int f = get_sb(feature);
int i;
MSG(0, "Info: superblock features = %x : ", f);
- if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
+ if (f & F2FS_FEATURE_ENCRYPT) {
MSG(0, "%s", " encrypt");
}
- if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
+ if (f & F2FS_FEATURE_VERITY) {
MSG(0, "%s", " verity");
}
- if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
+ if (f & F2FS_FEATURE_BLKZONED) {
MSG(0, "%s", " blkzoned");
}
- if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (f & F2FS_FEATURE_EXTRA_ATTR) {
MSG(0, "%s", " extra_attr");
}
- if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
+ if (f & F2FS_FEATURE_PRJQUOTA) {
MSG(0, "%s", " project_quota");
}
- if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
+ if (f & F2FS_FEATURE_INODE_CHKSUM) {
MSG(0, "%s", " inode_checksum");
}
- if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
+ if (f & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR) {
MSG(0, "%s", " flexible_inline_xattr");
}
- if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
+ if (f & F2FS_FEATURE_QUOTA_INO) {
MSG(0, "%s", " quota_ino");
}
- if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
+ if (f & F2FS_FEATURE_INODE_CRTIME) {
MSG(0, "%s", " inode_crtime");
}
- if (f & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
+ if (f & F2FS_FEATURE_LOST_FOUND) {
MSG(0, "%s", " lost_found");
}
- if (f & cpu_to_le32(F2FS_FEATURE_SB_CHKSUM)) {
+ if (f & F2FS_FEATURE_SB_CHKSUM) {
MSG(0, "%s", " sb_checksum");
}
- if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
+ if (f & F2FS_FEATURE_CASEFOLD) {
MSG(0, "%s", " casefold");
}
- if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ if (f & F2FS_FEATURE_COMPRESSION) {
MSG(0, "%s", " compression");
}
- if (f & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (f & F2FS_FEATURE_RO) {
MSG(0, "%s", " ro");
}
MSG(0, "\n");
return -1;
}
- if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if (!(get_sb(feature) & F2FS_FEATURE_RO) &&
(total_sections > segment_count ||
total_sections < F2FS_MIN_SEGMENTS ||
segs_per_sec > segment_count || !segs_per_sec)) {
/* Check zoned block device feature */
if (c.devices[0].zoned_model != F2FS_ZONED_NONE &&
- !(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
+ !(get_sb(feature) & F2FS_FEATURE_BLKZONED)) {
MSG(0, "\tMissing zoned block device feature\n");
return -1;
}
ovp_segments = get_cp(overprov_segment_count);
reserved_segments = get_cp(rsvd_segment_count);
- if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if (!(get_sb(feature) & F2FS_FEATURE_RO) &&
(fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
reserved_segments == 0)) {
MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
user_block_count = get_cp(user_block_count);
segment_count_main = get_sb(segment_count_main) +
- (cpu_to_le32(F2FS_FEATURE_RO) ? 1 : 0);
+ ((get_sb(feature) & F2FS_FEATURE_RO) ? 1 : 0);
log_blocks_per_seg = get_sb(log_blocks_per_seg);
if (!user_block_count || user_block_count >=
segment_count_main << log_blocks_per_seg) {
NR_CURSEG_TYPE) {
MSG(0, "\tWrong cp_pack_start_sum(%u) or cp_payload(%u)\n",
cp_pack_start_sum, cp_payload);
- if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM))
+ if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM)
return 1;
set_sb(cp_payload, cp_pack_start_sum - 1);
update_superblock(sb, SB_MASK_ALL);
int type, ret;
struct seg_entry *se;
- if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
+ if (get_sb(feature) & F2FS_FEATURE_RO)
return;
segno = GET_SEGNO(sbi, blk_addr);
START_BLOCK(sbi, segno + 1);
continue;
}
- if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if (!(get_sb(feature) & F2FS_FEATURE_RO) &&
IS_CUR_SEGNO(sbi, segno))
goto next_segment;
if (vblocks == 0 && not_enough)
u64 ssa_blk, to;
int ret;
- if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
+ if ((get_sb(feature) & F2FS_FEATURE_RO)) {
if (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
return;
ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
ASSERT(ret >= 0);
- if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
+ if (!(get_sb(feature) & F2FS_FEATURE_RO)) {
/* update original SSA too */
ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
ret = dev_write_block(curseg->sum_blk, ssa_blk);
int sb_changed = 0;
struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
- if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) &&
- c.feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
- sb->feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
+ if (!(get_sb(feature) & F2FS_FEATURE_ENCRYPT) &&
+ c.feature & F2FS_FEATURE_ENCRYPT) {
+ sb->feature = cpu_to_le32(get_sb(feature) |
+ F2FS_FEATURE_ENCRYPT);
MSG(0, "Info: Set Encryption feature\n");
sb_changed = 1;
}
- if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) &&
- c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
+ if (!(get_sb(feature) & F2FS_FEATURE_CASEFOLD) &&
+ c.feature & F2FS_FEATURE_CASEFOLD) {
if (!c.s_encoding) {
ERR_MSG("ERROR: Must specify encoding to enable casefolding.\n");
return -1;
}
- sb->feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
+ sb->feature = cpu_to_le32(get_sb(feature) |
+ F2FS_FEATURE_CASEFOLD);
MSG(0, "Info: Set Casefold feature\n");
sb_changed = 1;
}
/* TODO: quota needs to allocate inode numbers */
- c.feature = sb->feature;
+ c.feature = get_sb(feature);
if (!sb_changed)
return 0;
return -1;
/* precompute checksum seed for metadata */
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
sbi->total_valid_node_count = get_cp(valid_node_count);
type = CURSEG_WARM_NODE;
}
- if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if ((get_sb(feature) & F2FS_FEATURE_RO) &&
type != CURSEG_HOT_NODE)
type = CURSEG_HOT_NODE;
blkaddr = SM_I(sbi)->main_blkaddr;
- if (sbi->raw_super->feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (le32_to_cpu(sbi->raw_super->feature) & F2FS_FEATURE_RO) {
if (IS_NODESEG(type)) {
type = CURSEG_HOT_NODE;
blkaddr = __end_block_addr(sbi);
unsigned int blkaddr = datablock_addr(dn->node_blk, dn->ofs_in_node);
int ret;
- if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if ((get_sb(feature) & F2FS_FEATURE_RO) &&
type != CURSEG_HOT_DATA)
type = CURSEG_HOT_DATA;
node_blk->i.i_inline |= F2FS_INLINE_DATA;
node_blk->i.i_inline |= F2FS_DATA_EXIST;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
node_blk->i.i_inline |= F2FS_EXTRA_ATTR;
node_blk->i.i_extra_isize =
cpu_to_le16(calc_extra_isize());
if (n < 0)
return -1;
- if (!c.compress.enabled || (c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
+ if (!c.compress.enabled || (c.feature & F2FS_FEATURE_RO))
update_largest_extent(sbi, de->ino);
update_free_segments(sbi);
int preserve_limits; /* preserve quota limits */
int large_nat_bitmap;
int fix_chksum; /* fix old cp.chksum position */
- __le32 feature; /* defined features */
+ unsigned int feature; /* defined features */
unsigned int quota_bits; /* quota bits */
time_t fixed_time;
extern struct f2fs_configuration c;
static inline int get_inline_xattr_addrs(struct f2fs_inode *inode)
{
- if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
+ if (c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)
return le16_to_cpu(inode->i_inline_xattr_size);
else if (inode->i_inline & F2FS_INLINE_XATTR ||
inode->i_inline & F2FS_INLINE_DENTRY)
raw_node->i.i_size = cpu_to_le64(1 << get_sb(log_blocksize));
raw_node->i.i_blocks = cpu_to_le64(2);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
raw_node->i.i_inline = F2FS_EXTRA_ATTR;
raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
+ if (c.feature & F2FS_FEATURE_PRJQUOTA)
raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
+ if (c.feature & F2FS_FEATURE_INODE_CRTIME) {
raw_node->i.i_crtime = cpu_to_le32(mtime);
raw_node->i.i_crtime_nsec = 0;
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ if (c.feature & F2FS_FEATURE_COMPRESSION) {
raw_node->i.i_compr_blocks = 0;
raw_node->i.i_compress_algorithm = 0;
raw_node->i.i_log_cluster_size = 0;
{
u32 mask = feature_map(table, features);
if (mask) {
- c.feature |= cpu_to_le32(mask);
+ c.feature |= mask;
} else {
MSG(0, "Error: Wrong features %s\n", features);
return -1;
int write_inode(struct f2fs_node *inode, u64 blkaddr)
{
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
inode->i.i_inode_checksum =
cpu_to_le32(f2fs_inode_chksum(inode));
return dev_write_block(inode, blkaddr);
{
unsigned int size = offsetof(struct f2fs_inode, i_projid);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
+ if (c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)
size = offsetof(struct f2fs_inode, i_projid);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
+ if (c.feature & F2FS_FEATURE_PRJQUOTA)
size = offsetof(struct f2fs_inode, i_inode_checksum);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
size = offsetof(struct f2fs_inode, i_crtime);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
+ if (c.feature & F2FS_FEATURE_INODE_CRTIME)
size = offsetof(struct f2fs_inode, i_compr_blocks);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
+ if (c.feature & F2FS_FEATURE_COMPRESSION)
size = offsetof(struct f2fs_inode, i_extra_end);
return size - F2FS_EXTRA_ISIZE_OFFSET;
zone_size_bytes * zone_size_bytes -
(uint64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
+ if (c.feature & F2FS_FEATURE_RO)
zone_align_start_offset = 8192;
if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
get_sb(segment_count_nat))) *
c.blks_per_seg;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
+ if (c.feature & F2FS_FEATURE_RO)
blocks_for_ssa = 0;
else
blocks_for_ssa = total_valid_blks_available /
c.reserved_segments = get_reserved(sb, c.overprovision);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (c.feature & F2FS_FEATURE_RO) {
c.overprovision = 0;
c.reserved_segments = 0;
}
- if ((!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)) &&
+ if ((!(c.feature & F2FS_FEATURE_RO) &&
c.overprovision == 0) ||
c.total_segments < F2FS_MIN_SEGMENTS ||
(c.devices[0].total_sectors *
}
/* precompute checksum seed for metadata */
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM)
c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
utf8_to_utf16((char *)sb->volume_name, (const char *)c.vol_label,
qtype, c.next_free_nid - 1);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND))
+ if (c.feature & F2FS_FEATURE_LOST_FOUND)
c.lpf_ino = c.next_free_nid++;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
+ if (c.feature & F2FS_FEATURE_RO)
avail_zones = 2;
else
avail_zones = 6;
return -1;
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (c.feature & F2FS_FEATURE_RO) {
c.cur_seg[CURSEG_HOT_NODE] = last_section(last_zone(total_zones));
c.cur_seg[CURSEG_WARM_NODE] = 0;
c.cur_seg[CURSEG_COLD_NODE] = 0;
}
/* if there is redundancy, reassign it */
- if (!(c.feature & cpu_to_le32(F2FS_FEATURE_RO)))
+ if (!(c.feature & F2FS_FEATURE_RO))
verify_cur_segs();
cure_extension_list();
memcpy(sb->version, c.version, VERSION_LEN);
memcpy(sb->init_version, c.version, VERSION_LEN);
- if (c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
+ if (c.feature & F2FS_FEATURE_CASEFOLD) {
set_sb(s_encoding, c.s_encoding);
set_sb(s_encoding_flags, c.s_encoding_flags);
}
- sb->feature = c.feature;
+ sb->feature = cpu_to_le32(c.feature);
- if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
+ if (c.feature & F2FS_FEATURE_SB_CHKSUM) {
set_sb(checksum_offset, SB_CHKSUM_OFFSET);
set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
SB_CHKSUM_OFFSET));
c.reserved_segments);
/* main segments - reserved segments - (node + data segments) */
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (c.feature & F2FS_FEATURE_RO) {
set_cp(free_segment_count, f2fs_get_usable_segments(sb) - 2);
set_cp(user_block_count, ((get_cp(free_segment_count) + 2 -
get_cp(overprov_segment_count)) * c.blks_per_seg));
/* sit_journal */
journal = &c.sit_jnl;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (c.feature & F2FS_FEATURE_RO) {
i = CURSEG_RO_HOT_DATA;
vblocks = le16_to_cpu(journal->sit_j.entries[i].se.vblocks);
journal->sit_j.entries[i].segno = cp->cur_data_segno[0];
uint64_t start_inode_pos = get_sb(main_blkaddr);
uint64_t last_inode_pos;
- if (c.zoned_mode || c.feature & cpu_to_le32(F2FS_FEATURE_RO))
+ if (c.zoned_mode || c.feature & F2FS_FEATURE_RO)
return 0;
raw_node = calloc(sizeof(struct f2fs_node), 1);
unsigned short vblocks;
int idx = curseg_type;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO)) {
+ if (c.feature & F2FS_FEATURE_RO) {
if (curseg_type < NR_CURSEG_DATA_TYPE)
idx = CURSEG_RO_HOT_DATA;
else
}
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
+ if (c.feature & F2FS_FEATURE_LOST_FOUND) {
err = f2fs_write_lpf_inode();
if (err < 0) {
MSG(1, "\tError: Failed to write lost+found inode!!!\n");
if (c.defset == CONF_ANDROID)
MSG(0, "Info: Set conf for android\n");
- if (c.feature & le32_to_cpu(F2FS_FEATURE_CASEFOLD))
+ if (c.feature & F2FS_FEATURE_CASEFOLD)
MSG(0, "Info: Enable %s with casefolding\n",
f2fs_encoding2str(c.s_encoding));
- if (c.feature & le32_to_cpu(F2FS_FEATURE_PRJQUOTA))
+ if (c.feature & F2FS_FEATURE_PRJQUOTA)
MSG(0, "Info: Enable Project quota\n");
- if (c.feature & le32_to_cpu(F2FS_FEATURE_COMPRESSION))
+ if (c.feature & F2FS_FEATURE_COMPRESSION)
MSG(0, "Info: Enable Compression\n");
}
c.root_uid = c.root_gid = 0;
/* RO doesn't need any other features */
- if (c.feature & cpu_to_le32(F2FS_FEATURE_RO))
+ if (c.feature & F2FS_FEATURE_RO)
return;
/* -O encrypt -O project_quota,extra_attr,{quota} -O verity */
- c.feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
+ c.feature |= F2FS_FEATURE_ENCRYPT;
if (!kernel_version_over(4, 14))
- c.feature |= cpu_to_le32(F2FS_FEATURE_QUOTA_INO);
- c.feature |= cpu_to_le32(F2FS_FEATURE_PRJQUOTA);
- c.feature |= cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR);
- c.feature |= cpu_to_le32(F2FS_FEATURE_VERITY);
+ c.feature |= F2FS_FEATURE_QUOTA_INO;
+ c.feature |= F2FS_FEATURE_PRJQUOTA;
+ c.feature |= F2FS_FEATURE_EXTRA_ATTR;
+ c.feature |= F2FS_FEATURE_VERITY;
break;
}
#ifdef CONF_CASEFOLD
c.s_encoding = F2FS_ENC_UTF8_12_1;
- c.feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
+ c.feature |= F2FS_FEATURE_CASEFOLD;
#endif
#ifdef CONF_PROJID
- c.feature |= cpu_to_le32(F2FS_FEATURE_QUOTA_INO);
- c.feature |= cpu_to_le32(F2FS_FEATURE_PRJQUOTA);
- c.feature |= cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR);
+ c.feature |= F2FS_FEATURE_QUOTA_INO;
+ c.feature |= F2FS_FEATURE_PRJQUOTA;
+ c.feature |= F2FS_FEATURE_EXTRA_ATTR;
#endif
- if (c.feature & cpu_to_le32(F2FS_FEATURE_QUOTA_INO))
+ if (c.feature & F2FS_FEATURE_QUOTA_INO)
c.quota_bits = QUOTA_USR_BIT | QUOTA_GRP_BIT;
- if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
- c.feature |= cpu_to_le32(F2FS_FEATURE_QUOTA_INO);
+ if (c.feature & F2FS_FEATURE_PRJQUOTA) {
+ c.feature |= F2FS_FEATURE_QUOTA_INO;
c.quota_bits |= QUOTA_PRJ_BIT;
}
}
MSG(0, "\tError: Unknown flag %s\n",token);
mkfs_usage();
}
- c.feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
+ c.feature |= F2FS_FEATURE_CASEFOLD;
break;
case 'Z':
c.conf_reserved_sections = atoi(optarg);
add_default_options();
- if (!(c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR))) {
- if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
+ if (!(c.feature & F2FS_FEATURE_EXTRA_ATTR)) {
+ if (c.feature & F2FS_FEATURE_PRJQUOTA) {
MSG(0, "\tInfo: project quota feature should always be "
"enabled with extra attr feature\n");
exit(1);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
+ if (c.feature & F2FS_FEATURE_INODE_CHKSUM) {
MSG(0, "\tInfo: inode checksum feature should always be "
"enabled with extra attr feature\n");
exit(1);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
+ if (c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR) {
MSG(0, "\tInfo: flexible inline xattr feature should always be "
"enabled with extra attr feature\n");
exit(1);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
+ if (c.feature & F2FS_FEATURE_INODE_CRTIME) {
MSG(0, "\tInfo: inode crtime feature should always be "
"enabled with extra attr feature\n");
exit(1);
}
- if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ if (c.feature & F2FS_FEATURE_COMPRESSION) {
MSG(0, "\tInfo: compression feature should always be "
"enabled with extra attr feature\n");
exit(1);
c.trim = 0;
if (c.zoned_mode)
- c.feature |= cpu_to_le32(F2FS_FEATURE_BLKZONED);
+ c.feature |= F2FS_FEATURE_BLKZONED;
}
#ifdef HAVE_LIBBLKID