From 74c8164e1cdb1eb22f1d49d54e515e81821a8ad0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 17 Aug 2018 15:45:36 -0700 Subject: [PATCH] mpage: mpage_readpages() should submit IO as read-ahead a_ops->readpages() is only ever used for read-ahead, yet we don't flag the IO being submitted as such. Fix that up. Any file system that uses mpage_readpages() as its ->readpages() implementation will now get this right. Since we're passing in whether the IO is read-ahead or not, we don't need to pass in the 'gfp' separately, as it is dependent on the IO being read-ahead. Kill off that member. Add some documentation notes on ->readpages() being purely for read-ahead. Link: http://lkml.kernel.org/r/20180621010725.17813-3-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Andrew Morton Cc: Al Viro Cc: Chris Mason Cc: Christoph Hellwig Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/f2fs/data.c | 5 +++++ fs/mpage.c | 29 +++++++++++++++++++---------- include/linux/fs.h | 4 ++++ 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 8f931d699287..b7c9b58acf3e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1421,6 +1421,11 @@ out: /* * This function was originally taken from fs/mpage.c, and customized for f2fs. * Major change was from block_size == page_size in f2fs by default. + * + * Note that the aops->readpages() function is ONLY used for read-ahead. If + * this function ever deviates from doing just read-ahead, it should either + * use ->readpage() or do the necessary surgery to decouple ->readpages() + * readom read-ahead. */ static int f2fs_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, diff --git a/fs/mpage.c b/fs/mpage.c index 6dc90e456abf..c820dc9bebab 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -137,11 +137,11 @@ struct mpage_readpage_args { struct bio *bio; struct page *page; unsigned int nr_pages; + bool is_readahead; sector_t last_block_in_bio; struct buffer_head map_bh; unsigned long first_logical_block; get_block_t *get_block; - gfp_t gfp; }; /* @@ -170,8 +170,18 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) struct block_device *bdev = NULL; int length; int fully_mapped = 1; + int op_flags; unsigned nblocks; unsigned relative_block; + gfp_t gfp; + + if (args->is_readahead) { + op_flags = REQ_RAHEAD; + gfp = readahead_gfp_mask(page->mapping); + } else { + op_flags = 0; + gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); + } if (page_has_buffers(page)) goto confused; @@ -284,7 +294,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) * This page will go to BIO. Do we need to send this BIO off first? */ if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) - args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio); + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); alloc_new: if (args->bio == NULL) { @@ -296,14 +306,14 @@ alloc_new: args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), min_t(int, args->nr_pages, BIO_MAX_PAGES), - args->gfp); + gfp); if (args->bio == NULL) goto confused; } length = first_hole << blkbits; if (bio_add_page(args->bio, page, length, 0) < length) { - args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio); + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); goto alloc_new; } @@ -311,7 +321,7 @@ alloc_new: nblocks = map_bh->b_size >> blkbits; if ((buffer_boundary(map_bh) && relative_block == nblocks) || (first_hole != blocks_per_page)) - args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio); + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); else args->last_block_in_bio = blocks[blocks_per_page - 1]; out: @@ -319,7 +329,7 @@ out: confused: if (args->bio) - args->bio = mpage_bio_submit(REQ_OP_READ, 0, args->bio); + args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); if (!PageUptodate(page)) block_read_full_page(page, args->get_block); else @@ -377,7 +387,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, { struct mpage_readpage_args args = { .get_block = get_block, - .gfp = readahead_gfp_mask(mapping), + .is_readahead = true, }; unsigned page_idx; @@ -388,7 +398,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, page->index, - args.gfp)) { + readahead_gfp_mask(mapping))) { args.page = page; args.nr_pages = nr_pages - page_idx; args.bio = do_mpage_readpage(&args); @@ -397,7 +407,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, } BUG_ON(!list_empty(pages)); if (args.bio) - mpage_bio_submit(REQ_OP_READ, 0, args.bio); + mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); return 0; } EXPORT_SYMBOL(mpage_readpages); @@ -411,7 +421,6 @@ int mpage_readpage(struct page *page, get_block_t get_block) .page = page, .nr_pages = 1, .get_block = get_block, - .gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL), }; args.bio = do_mpage_readpage(&args); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9d319f1f66f6..a9242f336f02 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -344,6 +344,10 @@ struct address_space_operations { /* Set a page dirty. Return true if this dirtied it */ int (*set_page_dirty)(struct page *page); + /* + * Reads in the requested pages. Unlike ->readpage(), this is + * PURELY used for read-ahead!. + */ int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); -- 2.34.1