unsigned long addr, unsigned long end,
struct zap_details *details);
-int force_page_cache_readahead(struct address_space *, struct file *,
+void force_page_cache_readahead(struct address_space *, struct file *,
pgoff_t index, unsigned long nr_to_read);
-extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
- struct file *filp, pgoff_t offset, unsigned long nr_to_read,
+void __do_page_cache_readahead(struct address_space *, struct file *,
+ pgoff_t index, unsigned long nr_to_read,
unsigned long lookahead_size);
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
-static inline unsigned long ra_submit(struct file_ra_state *ra,
+static inline void ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
{
- return __do_page_cache_readahead(mapping, filp,
- ra->start, ra->size, ra->async_size);
+ __do_page_cache_readahead(mapping, filp,
+ ra->start, ra->size, ra->async_size);
}
/**
* the pages first, then submits them for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
- *
- * Returns the number of pages requested, or the maximum amount of I/O allowed.
*/
-unsigned int __do_page_cache_readahead(struct address_space *mapping,
+void __do_page_cache_readahead(struct address_space *mapping,
struct file *filp, pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size)
{
gfp_t gfp_mask = readahead_gfp_mask(mapping);
if (isize == 0)
- goto out;
+ return;
end_index = ((isize - 1) >> PAGE_SHIFT);
if (nr_pages)
read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
BUG_ON(!list_empty(&page_pool));
-out:
- return nr_pages;
}
/*
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
* memory at once.
*/
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read)
+void force_page_cache_readahead(struct address_space *mapping,
+ struct file *filp, pgoff_t offset, unsigned long nr_to_read)
{
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
struct file_ra_state *ra = &filp->f_ra;
unsigned long max_pages;
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
- return -EINVAL;
+ return;
/*
* If the request exceeds the readahead window, allow the read to
offset += this_chunk;
nr_to_read -= this_chunk;
}
- return 0;
}
/*
/*
* A minimal readahead algorithm for trivial sequential/random reads.
*/
-static unsigned long
-ondemand_readahead(struct address_space *mapping,
- struct file_ra_state *ra, struct file *filp,
- bool hit_readahead_marker, pgoff_t offset,
- unsigned long req_size)
+static void ondemand_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *filp,
+ bool hit_readahead_marker, pgoff_t offset,
+ unsigned long req_size)
{
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
rcu_read_unlock();
if (!start || start - offset > max_pages)
- return 0;
+ return;
ra->start = start;
ra->size = start - offset; /* old async_size */
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
- return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+ __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+ return;
initial_readahead:
ra->start = offset;
}
}
- return ra_submit(ra, mapping, filp);
+ ra_submit(ra, mapping, filp);
}
/**