This patch should resolve the bugs reported by the sparse tool.
Initial reports were written by "kbuild test robot" managed by fengguang.wu.
In my local machines, I've tested also by running:
> make C=2 CF="-D__CHECK_ENDIAN__"
Accordingly, I've found lots of warnings and bugs related to the endian
conversion. And I've fixed all at this moment.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
{
block_t start_blk, orphan_blkaddr, i, j;
- if (!(F2FS_CKPT(sbi)->ckpt_flags & CP_ORPHAN_PRESENT_FLAG))
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
return 0;
sbi->por_doing = 1;
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
- F2FS_CKPT(sbi)->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG);
+ clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
sbi->por_doing = 0;
return 0;
}
pre_version = le64_to_cpu(cp_block->checkpoint_ver);
/* Read the 2nd cp block in this CP pack */
- cp_addr += le64_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
cp_page_2 = get_meta_page(sbi, cp_addr);
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
block_t start_blk;
struct page *cp_page;
unsigned int data_sum_blocks, orphan_blocks;
+ unsigned int crc32 = 0;
void *kaddr;
- __u32 crc32 = 0;
int i;
/* Flush all the NAT/SIT pages */
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi);
if (data_sum_blocks < 3)
- ckpt->ckpt_flags |= CP_COMPACT_SUM_FLAG;
+ set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
- ckpt->ckpt_flags &= (~CP_COMPACT_SUM_FLAG);
+ clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
/ F2FS_ORPHANS_PER_BLOCK;
- ckpt->cp_pack_start_sum = 1 + orphan_blocks;
- ckpt->cp_pack_total_block_count = 2 + data_sum_blocks + orphan_blocks;
+ ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
if (is_umount) {
- ckpt->ckpt_flags |= CP_UMOUNT_FLAG;
- ckpt->cp_pack_total_block_count += NR_CURSEG_NODE_TYPE;
+ set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
} else {
- ckpt->ckpt_flags &= (~CP_UMOUNT_FLAG);
+ clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
+ data_sum_blocks + orphan_blocks);
}
if (sbi->n_orphans)
- ckpt->ckpt_flags |= CP_ORPHAN_PRESENT_FLAG;
+ set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
else
- ckpt->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG);
+ clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
- *(__u32 *)((unsigned char *)ckpt +
+ *(__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset))
= cpu_to_le32(crc32);
sbi->alloc_valid_block_count = 0;
/* Here, we only have one bio having CP pack */
- if (sbi->ckpt->ckpt_flags & CP_ERROR_FLAG)
+ if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))
sbi->sb->s_flags |= MS_RDONLY;
else
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
#define MAX_DESIRED_PAGES_WP 4096
-int f2fs_write_data_pages(struct address_space *mapping,
+static int f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
static LIST_HEAD(f2fs_stat_list);
static struct dentry *debugfs_root;
-void update_general_status(struct f2fs_sb_info *sbi)
+static void update_general_status(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = sbi->stat_info;
int i;
if (le16_to_cpu(de->name_len) != namelen)
return false;
- if (le32_to_cpu(de->hash_code) != namehash)
+ if (de->hash_code != namehash)
return false;
return true;
nbucket = dir_buckets(level);
nblock = bucket_blocks(level);
- bidx = dir_block_index(level, namehash % nbucket);
+ bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket);
end_block = bidx + nblock;
for (; bidx < end_block; bidx++) {
nbucket = dir_buckets(level);
nblock = bucket_blocks(level);
- bidx = dir_block_index(level, (dentry_hash % nbucket));
+ bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
mutex_lock_op(sbi, DENTRY_OPS);
wait_on_page_writeback(dentry_page);
de = &dentry_blk->dentry[bit_pos];
- de->hash_code = cpu_to_le32(dentry_hash);
+ de->hash_code = dentry_hash;
de->name_len = cpu_to_le16(namelen);
memcpy(dentry_blk->filename[bit_pos], name, namelen);
de->ino = cpu_to_le32(inode->i_ino);
sbi->s_dirty = 0;
}
+static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return ckpt_flags & f;
+}
+
+static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ ckpt_flags |= f;
+ cp->ckpt_flags = cpu_to_le32(ckpt_flags);
+}
+
+static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ ckpt_flags &= (~f);
+ cp->ckpt_flags = cpu_to_le32(ckpt_flags);
+}
+
static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
{
mutex_lock_nested(&sbi->fs_lock[t], t);
static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- int offset = (flag == NAT_BITMAP) ? ckpt->sit_ver_bitmap_bytesize : 0;
+ int offset = (flag == NAT_BITMAP) ?
+ le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
return &ckpt->sit_nat_version_bitmap + offset;
}
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
- start_addr = le64_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
/*
* odd numbered checkpoint should at cp segment 0
hash = buf[0];
minor_hash = buf[1];
- f2fs_hash = hash;
- f2fs_hash &= ~F2FS_HASH_COL_BIT;
+ f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
return f2fs_hash;
}
memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
dst->i.i_size = 0;
- dst->i.i_blocks = 1;
- dst->i.i_links = 1;
+ dst->i.i_blocks = cpu_to_le64(1);
+ dst->i.i_links = cpu_to_le32(1);
dst->i.i_xattr_nid = 0;
new_ni = old_ni;
void *kaddr = page_address(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
rn->footer.cp_ver = ckpt->checkpoint_ver;
- rn->footer.next_blkaddr = blkaddr;
+ rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
static inline nid_t ino_of_node(struct page *node_page)
struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
struct f2fs_inode *raw_inode = &(raw_node->i);
- inode->i_mode = le32_to_cpu(raw_inode->i_mode);
+ inode->i_mode = le16_to_cpu(raw_inode->i_mode);
i_size_write(inode, le64_to_cpu(raw_inode->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
SetPageError(page);
if (page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
- p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG;
+ set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
set_page_dirty(page);
}
end_page_writeback(page);
segno = le32_to_cpu(ckpt->cur_data_segno[type]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
CURSEG_HOT_DATA]);
- if (ckpt->ckpt_flags & CP_UMOUNT_FLAG)
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
else
blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
CURSEG_HOT_NODE]);
blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
CURSEG_HOT_NODE]);
- if (ckpt->ckpt_flags & CP_UMOUNT_FLAG)
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
type - CURSEG_HOT_NODE);
else
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
- if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
+ if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
struct f2fs_summary *ns = &sum->entries[0];
int i;
for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
{
int type = CURSEG_HOT_DATA;
- if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) {
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
/* restore for compacted data summary */
if (read_compacted_summaries(sbi))
return -EINVAL;
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG)
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG)
+ if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
return;
}
kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
}
-void f2fs_destroy_inode(struct inode *inode)
+static void f2fs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
if (sanity_check_raw_super(raw_super))
goto free_sb_buf;
- sb->s_maxbytes = max_file_size(raw_super->log_blocksize);
+ sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
/* if there are nt orphan nodes free them */
err = -EINVAL;
- if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) &&
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
recover_orphan_inodes(sbi))
goto free_node_inode;
}
/* recover fsynced data */
- if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) &&
+ if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
!test_opt(sbi, DISABLE_ROLL_FORWARD))
recover_fsync_data(sbi);
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
#define ENTRIES_IN_SUM 512
-#define SUMMARY_SIZE (sizeof(struct f2fs_summary))
-#define SUM_FOOTER_SIZE (sizeof(struct summary_footer))
+#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
/* a summary entry for a 4KB-sized block in a segment */
__u32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))