From: Matthew Wilcox (Oracle) Date: Fri, 16 Oct 2020 03:06:28 +0000 (-0700) Subject: mm/readahead: add page_cache_sync_ra and page_cache_async_ra X-Git-Tag: v5.15~2654^2~118 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fefa7c478fdafe71c64b5ddf817ac0271aed1146;p=platform%2Fkernel%2Flinux-starfive.git mm/readahead: add page_cache_sync_ra and page_cache_async_ra Reimplement page_cache_sync_readahead() and page_cache_async_readahead() as wrappers around versions of the function which take a readahead_control in preparation for making do_sync_mmap_readahead() pass down an RAC struct. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Cc: David Howells Cc: Eric Biggers Link: https://lkml.kernel.org/r/20200903140844.14194-8-willy@infradead.org Signed-off-by: Linus Torvalds --- diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 37f209c..c77b7c31 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -761,16 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) - -void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, - struct file *, pgoff_t index, unsigned long req_count); -void page_cache_async_readahead(struct address_space *, struct file_ra_state *, - struct file *, struct page *, pgoff_t index, - unsigned long req_count); -void page_cache_ra_unbounded(struct readahead_control *, - unsigned long nr_to_read, unsigned long lookahead_count); - /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. @@ -818,6 +808,60 @@ struct readahead_control { ._index = i, \ } +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) + +void page_cache_ra_unbounded(struct readahead_control *, + unsigned long nr_to_read, unsigned long lookahead_count); +void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *, + unsigned long req_count); +void page_cache_async_ra(struct readahead_control *, struct file_ra_state *, + struct page *, unsigned long req_count); + +/** + * page_cache_sync_readahead - generic file readahead + * @mapping: address_space which holds the pagecache and I/O vectors + * @ra: file_ra_state which holds the readahead state + * @file: Used by the filesystem for authentication. + * @index: Index of first page to be read. + * @req_count: Total number of pages being read by the caller. + * + * page_cache_sync_readahead() should be called when a cache miss happened: + * it will submit the read. The readahead logic may decide to piggyback more + * pages onto the read request if access patterns suggest it will improve + * performance. + */ +static inline +void page_cache_sync_readahead(struct address_space *mapping, + struct file_ra_state *ra, struct file *file, pgoff_t index, + unsigned long req_count) +{ + DEFINE_READAHEAD(ractl, file, mapping, index); + page_cache_sync_ra(&ractl, ra, req_count); +} + +/** + * page_cache_async_readahead - file readahead for marked pages + * @mapping: address_space which holds the pagecache and I/O vectors + * @ra: file_ra_state which holds the readahead state + * @file: Used by the filesystem for authentication. + * @page: The page at @index which triggered the readahead call. + * @index: Index of first page to be read. + * @req_count: Total number of pages being read by the caller. + * + * page_cache_async_readahead() should be called when a page is used which + * is marked as PageReadahead; this is a marker to suggest that the application + * has used up enough of the readahead window that we should start pulling in + * more pages. + */ +static inline +void page_cache_async_readahead(struct address_space *mapping, + struct file_ra_state *ra, struct file *file, + struct page *page, pgoff_t index, unsigned long req_count) +{ + DEFINE_READAHEAD(ractl, file, mapping, index); + page_cache_async_ra(&ractl, ra, page, req_count); +} + /** * readahead_page - Get the next page to read. * @rac: The current readahead request. diff --git a/mm/readahead.c b/mm/readahead.c index 3115ced..620ac83 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -550,25 +550,9 @@ readit: do_page_cache_ra(ractl, ra->size, ra->async_size); } -/** - * page_cache_sync_readahead - generic file readahead - * @mapping: address_space which holds the pagecache and I/O vectors - * @ra: file_ra_state which holds the readahead state - * @filp: passed on to ->readpage() and ->readpages() - * @index: Index of first page to be read. - * @req_count: Total number of pages being read by the caller. - * - * page_cache_sync_readahead() should be called when a cache miss happened: - * it will submit the read. The readahead logic may decide to piggyback more - * pages onto the read request if access patterns suggest it will improve - * performance. - */ -void page_cache_sync_readahead(struct address_space *mapping, - struct file_ra_state *ra, struct file *filp, - pgoff_t index, unsigned long req_count) +void page_cache_sync_ra(struct readahead_control *ractl, + struct file_ra_state *ra, unsigned long req_count) { - DEFINE_READAHEAD(ractl, filp, mapping, index); - /* no read-ahead */ if (!ra->ra_pages) return; @@ -577,38 +561,20 @@ void page_cache_sync_readahead(struct address_space *mapping, return; /* be dumb */ - if (filp && (filp->f_mode & FMODE_RANDOM)) { - force_page_cache_ra(&ractl, req_count); + if (ractl->file && (ractl->file->f_mode & FMODE_RANDOM)) { + force_page_cache_ra(ractl, req_count); return; } /* do read-ahead */ - ondemand_readahead(&ractl, ra, false, req_count); + ondemand_readahead(ractl, ra, false, req_count); } -EXPORT_SYMBOL_GPL(page_cache_sync_readahead); +EXPORT_SYMBOL_GPL(page_cache_sync_ra); -/** - * page_cache_async_readahead - file readahead for marked pages - * @mapping: address_space which holds the pagecache and I/O vectors - * @ra: file_ra_state which holds the readahead state - * @filp: passed on to ->readpage() and ->readpages() - * @page: The page at @index which triggered the readahead call. - * @index: Index of first page to be read. - * @req_count: Total number of pages being read by the caller. - * - * page_cache_async_readahead() should be called when a page is used which - * is marked as PageReadahead; this is a marker to suggest that the application - * has used up enough of the readahead window that we should start pulling in - * more pages. - */ -void -page_cache_async_readahead(struct address_space *mapping, - struct file_ra_state *ra, struct file *filp, - struct page *page, pgoff_t index, - unsigned long req_count) +void page_cache_async_ra(struct readahead_control *ractl, + struct file_ra_state *ra, struct page *page, + unsigned long req_count) { - DEFINE_READAHEAD(ractl, filp, mapping, index); - /* no read-ahead */ if (!ra->ra_pages) return; @@ -624,16 +590,16 @@ page_cache_async_readahead(struct address_space *mapping, /* * Defer asynchronous read-ahead on IO congestion. */ - if (inode_read_congested(mapping->host)) + if (inode_read_congested(ractl->mapping->host)) return; if (blk_cgroup_congested()) return; /* do read-ahead */ - ondemand_readahead(&ractl, ra, true, req_count); + ondemand_readahead(ractl, ra, true, req_count); } -EXPORT_SYMBOL_GPL(page_cache_async_readahead); +EXPORT_SYMBOL_GPL(page_cache_async_ra); ssize_t ksys_readahead(int fd, loff_t offset, size_t count) {