This patch uses percpu_count for sbi->alloc_valid_block_count.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
/* update user_block_counts */
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
/* Here, we only have one bio having CP pack */
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
- block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */
/* # of pages, see count_type */
struct percpu_counter nr_pages[NR_COUNT_TYPE];
+ /* # of allocated blocks */
+ struct percpu_counter alloc_valid_block_count;
struct f2fs_mount_info mount_opt; /* mount options */
inode->i_blocks += *count;
sbi->total_valid_block_count =
sbi->total_valid_block_count + (block_t)(*count);
- sbi->alloc_valid_block_count += (block_t)(*count);
spin_unlock(&sbi->stat_lock);
+
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
return true;
}
if (inode)
inode->i_blocks++;
- sbi->alloc_valid_block_count++;
sbi->total_valid_node_count++;
sbi->total_valid_block_count++;
spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->alloc_valid_block_count);
return true;
}
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
- if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
- > sbi->user_block_count)
+ s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
+
+ if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
for (i = 0; i < NR_COUNT_TYPE; i++)
percpu_counter_destroy(&sbi->nr_pages[i]);
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
}
static void f2fs_put_super(struct super_block *sb)
if (err)
return err;
}
- return 0;
+
+ return percpu_counter_init(&sbi->alloc_valid_block_count, 0,
+ GFP_KERNEL);
}
/*
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
+
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
spin_lock_init(&sbi->inode_lock[i]);