struct node_info ni;
struct f2fs_node *node_blk;
u32 skip = 0;
- u32 i, idx;
+ u32 i, idx = 0;
+
+ get_node_info(sbi, nid, &ni);
+
+ node_blk = calloc(BLOCK_SZ, 1);
+ ASSERT(node_blk);
+
+ dev_read_block(node_blk, ni.blk_addr);
switch (ntype) {
case TYPE_DIRECT_NODE:
- skip = idx = ADDRS_PER_BLOCK;
+ skip = idx = ADDRS_PER_BLOCK(&node_blk->i);
break;
case TYPE_INDIRECT_NODE:
idx = NIDS_PER_BLOCK;
- skip = idx * ADDRS_PER_BLOCK;
+ skip = idx * ADDRS_PER_BLOCK(&node_blk->i);
break;
case TYPE_DOUBLE_INDIRECT_NODE:
skip = 0;
return;
}
- get_node_info(sbi, nid, &ni);
-
- node_blk = calloc(BLOCK_SZ, 1);
- ASSERT(node_blk);
-
- dev_read_block(node_blk, ni.blk_addr);
-
for (i = 0; i < idx; i++, (*ofs)++) {
switch (ntype) {
case TYPE_DIRECT_NODE:
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
- return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(&node_blk->i);
+ return bidx * ADDRS_PER_BLOCK(&node_blk->i) +
+ ADDRS_PER_INODE(&node_blk->i);
}
static void dump_data_offset(u32 blk_addr, int ofs_in_node)
/* check extent info */
check_extent_info(&child, blkaddr, 0);
+ if (blkaddr == COMPRESS_ADDR) {
+ fsck->chk.valid_blk_cnt++;
+ *blk_cnt = *blk_cnt + 1;
+ continue;
+ }
+
if (blkaddr != 0) {
ret = fsck_chk_data_blk(sbi,
IS_CASEFOLDED(&node_blk->i),
}
skip:
if (ntype == TYPE_DIRECT_NODE)
- child.pgofs += ADDRS_PER_BLOCK;
+ child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
else if (ntype == TYPE_INDIRECT_NODE)
- child.pgofs += ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
+ NIDS_PER_BLOCK;
else
- child.pgofs += ADDRS_PER_BLOCK *
+ child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
NIDS_PER_BLOCK * NIDS_PER_BLOCK;
}
child->p_ino = nid;
child->pp_ino = le32_to_cpu(inode->i_pino);
- for (idx = 0; idx < ADDRS_PER_BLOCK; idx++, child->pgofs++) {
+ for (idx = 0; idx < ADDRS_PER_BLOCK(inode); idx++, child->pgofs++) {
block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
check_extent_info(child, blkaddr, 0);
if (blkaddr == 0x0)
continue;
+ if (blkaddr == COMPRESS_ADDR) {
+ F2FS_FSCK(sbi)->chk.valid_blk_cnt++;
+ *blk_cnt = *blk_cnt + 1;
+ continue;
+ }
ret = fsck_chk_data_blk(sbi, IS_CASEFOLDED(inode),
blkaddr, child,
le64_to_cpu(inode->i_blocks) == *blk_cnt, ftype,
FIX_MSG("Set indirect node 0x%x -> 0", i);
}
skip:
- child->pgofs += ADDRS_PER_BLOCK;
+ child->pgofs += ADDRS_PER_BLOCK(&node_blk->i);
}
}
FIX_MSG("Set double indirect node 0x%x -> 0", i);
}
skip:
- child->pgofs += ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ child->pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
+ NIDS_PER_BLOCK;
}
}
fsck->chk.valid_blk_cnt--;
f2fs_clear_main_bitmap(sbi, ni.blk_addr);
- for (i = 0; i < ADDRS_PER_BLOCK; i++) {
+ for (i = 0; i < ADDRS_PER_BLOCK(&node->i); i++) {
addr = le32_to_cpu(node->dn.addr[i]);
if (!addr)
continue;
DISP_u64(inode, i_crtime);
DISP_u32(inode, i_crtime_nsec);
}
+ if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ DISP_u64(inode, i_compr_blocks);
+ DISP_u32(inode, i_compress_algrithm);
+ DISP_u32(inode, i_log_cluster_size);
+ DISP_u32(inode, i_padding);
+ }
}
DISP_u32(inode, i_addr[ofs]); /* Pointers to data blocks */
DBG(verbose,
"Node ID [0x%x:%u] is direct node or indirect node.\n",
nid, nid);
- for (i = 0; i <= 10; i++)
+ for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
i, dump_blk[i], dump_blk[i]);
}
if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
MSG(0, "%s", " casefold");
}
+ if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ MSG(0, "%s", " compression");
+ }
MSG(0, "\n");
MSG(0, "Info: superblock encrypt level = %d, salt = ",
sb->encryption_level);
static inline bool is_valid_data_blkaddr(block_t blkaddr)
{
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
+ blkaddr == COMPRESS_ADDR)
return 0;
return 1;
}
/* step 3: recover data indices */
start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
- end = start + ADDRS_PER_PAGE(node_blk);
+ end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
for (; start < end; start++, ofs_in_node++) {
blkaddr = datablock_addr(node_blk, ofs_in_node);
int offset[4], unsigned int noffset[4])
{
const long direct_index = ADDRS_PER_INODE(&node->i);
- const long direct_blks = ADDRS_PER_BLOCK;
+ const long direct_blks = ADDRS_PER_BLOCK(&node->i);
const long dptrs_per_blk = NIDS_PER_BLOCK;
- const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK(&node->i) * NIDS_PER_BLOCK;
const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
int n = 0;
int level = 0;
#include "fsck.h"
-#define ADDRS_PER_PAGE(page) \
- (IS_INODE(page) ? ADDRS_PER_INODE(&page->i) : ADDRS_PER_BLOCK)
-
static inline int IS_INODE(struct f2fs_node *node)
{
return ((node)->footer.nid == (node)->footer.ino);
}
+static inline unsigned int ADDRS_PER_PAGE(struct f2fs_sb_info *sbi,
+ struct f2fs_node *node_blk, struct f2fs_node *inode_blk)
+{
+ nid_t ino = le32_to_cpu(node_blk->footer.ino);
+ unsigned int nblocks;
+
+ if (IS_INODE(node_blk))
+ return ADDRS_PER_INODE(&node_blk->i);
+
+ if (!inode_blk) {
+ struct node_info ni;
+
+ inode_blk = calloc(BLOCK_SZ, 2);
+ ASSERT(inode_blk);
+
+ get_node_info(sbi, ino, &ni);
+ ASSERT(dev_read_block(inode_blk, ni.blk_addr) >= 0);
+ nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
+ free(inode_blk);
+ } else {
+ nblocks = ADDRS_PER_BLOCK(&inode_blk->i);
+ }
+ return nblocks;
+}
+
static inline __le32 *blkaddr_in_inode(struct f2fs_node *node)
{
return node->i.i_addr + get_extra_isize(node);
free(index_node);
index_node = (dn.node_blk == dn.inode_blk) ?
NULL : dn.node_blk;
- remained_blkentries = ADDRS_PER_PAGE(dn.node_blk);
+ remained_blkentries = ADDRS_PER_PAGE(sbi,
+ dn.node_blk, dn.inode_blk);
}
ASSERT(remained_blkentries > 0);
free(index_node);
index_node = (dn.node_blk == dn.inode_blk) ?
NULL : dn.node_blk;
- remained_blkentries = ADDRS_PER_PAGE(dn.node_blk);
+ remained_blkentries = ADDRS_PER_PAGE(sbi,
+ dn.node_blk, dn.inode_blk);
}
ASSERT(remained_blkentries > 0);
#define NULL_ADDR 0x0U
#define NEW_ADDR -1U
+#define COMPRESS_ADDR -2U
#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
#define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_FEATURE_CASEFOLD 0x1000
+ #define F2FS_FEATURE_COMPRESSION 0x2000
#define MAX_VOLUME_NAME 512
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
__get_extra_isize(inode))
#define ADDRS_PER_INODE(i) addrs_per_inode(i)
-#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(i) addrs_per_block(i)
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
#define IS_CASEFOLDED(dir) ((dir)->i_flags & F2FS_CASEFOLD_FL)
+/*
+ * inode flags
+ */
+#define F2FS_COMPR_FL 0x00000004 /* Compress file */
struct f2fs_inode {
__le16 i_mode; /* file mode */
__u8 i_advise; /* file hints */
__le32 i_inode_checksum;/* inode meta checksum */
__le64 i_crtime; /* creation time */
__le32 i_crtime_nsec; /* creation time in nano scale */
+ __le64 i_compr_blocks; /* # of compressed blocks */
+ __u8 i_compress_algrithm; /* compress algrithm */
+ __u8 i_log_cluster_size; /* log of cluster size */
+ __le16 i_padding; /* padding */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __attribute__((packed));
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
struct direct_node {
- __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+ __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */
} __attribute__((packed));
struct indirect_node {
extern int utf16_to_utf8(char *, const u_int16_t *, size_t, size_t);
extern int log_base_2(u_int32_t);
extern unsigned int addrs_per_inode(struct f2fs_inode *);
+extern unsigned int addrs_per_block(struct f2fs_inode *);
extern __u32 f2fs_inode_chksum(struct f2fs_node *);
extern __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *);
extern int write_inode(struct f2fs_node *, u64);
{ "verity", F2FS_FEATURE_VERITY }, /* reserved */ \
{ "sb_checksum", F2FS_FEATURE_SB_CHKSUM }, \
{ "casefold", F2FS_FEATURE_CASEFOLD }, \
+ { "compression", F2FS_FEATURE_COMPRESSION }, \
{ NULL, 0x0}, \
};
return __f2fs_dentry_hash(name, len);
}
+#define ALIGN_DOWN(addrs, size) (((addrs) / (size)) * (size))
unsigned int addrs_per_inode(struct f2fs_inode *i)
{
- return CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
+ unsigned int addrs = CUR_ADDRS_PER_INODE(i) - get_inline_xattr_addrs(i);
+
+ if (!(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
+ return addrs;
+ return ALIGN_DOWN(addrs, 1 << i->i_log_cluster_size);
+}
+
+unsigned int addrs_per_block(struct f2fs_inode *i)
+{
+ if (!(le32_to_cpu(i->i_flags) & F2FS_COMPR_FL))
+ return DEF_ADDRS_PER_BLOCK;
+ return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1 << i->i_log_cluster_size);
}
/*
if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
size = offsetof(struct f2fs_inode, i_crtime);
if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME))
+ size = offsetof(struct f2fs_inode, i_compr_blocks);
+ if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION))
size = offsetof(struct f2fs_inode, i_extra_end);
return size - F2FS_EXTRA_ISIZE_OFFSET;
raw_node->i.i_crtime_nsec = 0;
}
+ if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ raw_node->i.i_compress_algrithm = 0;
+ raw_node->i.i_log_cluster_size = 0;
+ raw_node->i.i_padding = 0;
+ }
+
data_blk_nor = get_sb(main_blkaddr) +
c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
raw_node->i.i_crtime_nsec = 0;
}
+ if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ raw_node->i.i_compress_algrithm = 0;
+ raw_node->i.i_log_cluster_size = 0;
+ raw_node->i.i_padding = 0;
+ }
+
data_blk_nor = f2fs_add_default_dentry_lpf();
if (data_blk_nor == 0) {
MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
"enabled with extra attr feature\n");
exit(1);
}
+ if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
+ MSG(0, "\tInfo: compression feature should always be "
+ "enabled with extra attr feature\n");
+ exit(1);
+ }
}
if (optind >= argc) {