Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R ()
499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R ()
499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R ()
499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+
retry_flush_dents:
mutex_lock_all(sbi);
sync_node_pages(sbi, 0, &wbc);
goto retry_flush_nodes;
}
+ blk_finish_plug(&plug);
}
static void unblock_operations(struct f2fs_sb_info *sbi)
return;
}
-struct page *find_data_page(struct inode *inode, pgoff_t index)
+struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
return page;
}
- err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- f2fs_put_page(page, 0);
- return ERR_PTR(-EIO);
+ err = f2fs_readpage(sbi, page, dn.data_blkaddr,
+ sync ? READ_SYNC : READA);
+ if (sync) {
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ f2fs_put_page(page, 0);
+ return ERR_PTR(-EIO);
+ }
}
return page;
}
for (; bidx < end_block; bidx++) {
/* no need to allocate new dentry pages to all the indices */
- dentry_page = find_data_page(dir, bidx);
+ dentry_page = find_data_page(dir, bidx, true);
if (IS_ERR(dentry_page)) {
room = true;
continue;
*/
int reserve_new_block(struct dnode_of_data *);
void update_extent_cache(block_t, struct dnode_of_data *);
-struct page *find_data_page(struct inode *, pgoff_t);
+struct page *find_data_page(struct inode *, pgoff_t, bool);
struct page *get_lock_data_page(struct inode *, pgoff_t);
struct page *get_new_data_page(struct inode *, pgoff_t, bool);
int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
if (!offset)
return;
- page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
+ page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
if (IS_ERR(page))
return;
next_step:
entry = sum;
+
for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
f2fs_put_page(node_page, 1);
stat_inc_node_blk_count(sbi, 1);
}
+
if (initial) {
initial = false;
goto next_step;
next_step:
entry = sum;
+
for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
struct page *data_page;
struct inode *inode;
continue;
data_page = find_data_page(inode,
- start_bidx + ofs_in_node);
+ start_bidx + ofs_in_node, false);
if (IS_ERR(data_page))
goto next_iput;
next_iput:
iput(inode);
}
+
if (++phase < 4)
goto next_step;
{
struct page *sum_page;
struct f2fs_summary_block *sum;
+ struct blk_plug plug;
/* read segment summary of victim */
sum_page = get_sum_page(sbi, segno);
if (IS_ERR(sum_page))
return;
+ blk_start_plug(&plug);
+
sum = page_address(sum_page);
switch (GET_SUM_TYPE((&sum->footer))) {
gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
break;
}
+ blk_finish_plug(&plug);
+
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
stat_inc_call_count(sbi->stat_info);
{
struct address_space *mapping = sbi->meta_inode->i_mapping;
struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct blk_plug plug;
struct page *page;
pgoff_t index;
int i;
+ blk_start_plug(&plug);
+
for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
if (nid >= nm_i->max_nid)
nid = 0;
f2fs_put_page(page, 0);
}
+ blk_finish_plug(&plug);
}
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
struct address_space *mapping = sbi->node_inode->i_mapping;
+ struct blk_plug plug;
struct page *page;
int err, i, end;
nid_t nid;
else if (err == LOCKED_PAGE)
goto page_hit;
+ blk_start_plug(&plug);
+
/* Then, try readahead for siblings of the desired node */
end = start + MAX_RA_NODE;
end = min(end, NIDS_PER_BLOCK);
ra_node_page(sbi, nid);
}
+ blk_finish_plug(&plug);
+
lock_page(page);
page_hit: