1 // SPDX-License-Identifier: GPL-2.0-only
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8 * modified for async RPC by okir@monad.swb.de
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
32 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
34 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
35 static const struct nfs_rw_ops nfs_rw_read_ops;
37 static struct kmem_cache *nfs_rdata_cachep;
39 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
41 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
44 p->rw_mode = FMODE_READ;
48 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
50 kmem_cache_free(nfs_rdata_cachep, rhdr);
54 int nfs_return_empty_page(struct page *page)
56 zero_user(page, 0, PAGE_SIZE);
57 SetPageUptodate(page);
62 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
63 struct inode *inode, bool force_mds,
64 const struct nfs_pgio_completion_ops *compl_ops)
66 struct nfs_server *server = NFS_SERVER(inode);
67 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
69 #ifdef CONFIG_NFS_V4_1
70 if (server->pnfs_curr_ld && !force_mds)
71 pg_ops = server->pnfs_curr_ld->pg_read_ops;
73 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
76 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
78 static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
80 struct nfs_pgio_mirror *pgm;
83 nfs_pageio_complete(pgio);
85 /* It doesn't make sense to do mirrored reads! */
86 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
88 pgm = &pgio->pg_mirrors[0];
89 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
90 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
91 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
95 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
97 struct nfs_pgio_mirror *mirror;
99 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
100 pgio->pg_ops->pg_cleanup(pgio);
102 pgio->pg_ops = &nfs_pgio_rw_ops;
104 /* read path should never have more than one mirror */
105 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
107 mirror = &pgio->pg_mirrors[0];
108 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
110 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
112 static void nfs_readpage_release(struct nfs_page *req, int error)
114 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
115 struct page *page = req->wb_page;
117 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
118 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
119 (long long)req_offset(req));
121 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
123 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
124 if (PageUptodate(page))
125 nfs_fscache_write_page(inode, page);
128 nfs_release_request(req);
131 struct nfs_readdesc {
132 struct nfs_pageio_descriptor pgio;
133 struct nfs_open_context *ctx;
136 static void nfs_page_group_set_uptodate(struct nfs_page *req)
138 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
139 SetPageUptodate(req->wb_page);
142 static void nfs_read_completion(struct nfs_pgio_header *hdr)
144 unsigned long bytes = 0;
147 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
149 while (!list_empty(&hdr->pages)) {
150 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
151 struct page *page = req->wb_page;
152 unsigned long start = req->wb_pgbase;
153 unsigned long end = req->wb_pgbase + req->wb_bytes;
155 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
156 /* note: regions of the page not covered by a
157 * request are zeroed in readpage_async_filler */
158 if (bytes > hdr->good_bytes) {
159 /* nothing in this request was good, so zero
160 * the full extent of the request */
161 zero_user_segment(page, start, end);
163 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
164 /* part of this request has good bytes, but
165 * not all. zero the bad bytes */
166 start += hdr->good_bytes - bytes;
167 WARN_ON(start < req->wb_pgbase);
168 zero_user_segment(page, start, end);
172 bytes += req->wb_bytes;
173 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
174 if (bytes <= hdr->good_bytes)
175 nfs_page_group_set_uptodate(req);
178 xchg(&nfs_req_openctx(req)->error, error);
181 nfs_page_group_set_uptodate(req);
182 nfs_list_remove_request(req);
183 nfs_readpage_release(req, error);
189 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
190 struct rpc_message *msg,
191 const struct nfs_rpc_ops *rpc_ops,
192 struct rpc_task_setup *task_setup_data, int how)
194 rpc_ops->read_setup(hdr, msg);
195 trace_nfs_initiate_read(hdr);
199 nfs_async_read_error(struct list_head *head, int error)
201 struct nfs_page *req;
203 while (!list_empty(head)) {
204 req = nfs_list_entry(head->next);
205 nfs_list_remove_request(req);
206 nfs_readpage_release(req, error);
210 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
211 .error_cleanup = nfs_async_read_error,
212 .completion = nfs_read_completion,
216 * This is the callback from RPC telling us whether a reply was
217 * received or some error occurred (timeout or socket shutdown).
219 static int nfs_readpage_done(struct rpc_task *task,
220 struct nfs_pgio_header *hdr,
223 int status = NFS_PROTO(inode)->read_done(task, hdr);
227 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
228 trace_nfs_readpage_done(task, hdr);
230 if (task->tk_status == -ESTALE) {
231 nfs_set_inode_stale(inode);
232 nfs_mark_for_revalidate(inode);
237 static void nfs_readpage_retry(struct rpc_task *task,
238 struct nfs_pgio_header *hdr)
240 struct nfs_pgio_args *argp = &hdr->args;
241 struct nfs_pgio_res *resp = &hdr->res;
243 /* This is a short read! */
244 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
245 trace_nfs_readpage_short(task, hdr);
247 /* Has the server at least made some progress? */
248 if (resp->count == 0) {
249 nfs_set_pgio_error(hdr, -EIO, argp->offset);
253 /* For non rpc-based layout drivers, retry-through-MDS */
255 hdr->pnfs_error = -EAGAIN;
259 /* Yes, so retry the read at the end of the hdr */
260 hdr->mds_offset += resp->count;
261 argp->offset += resp->count;
262 argp->pgbase += resp->count;
263 argp->count -= resp->count;
266 rpc_restart_call_prepare(task);
269 static void nfs_readpage_result(struct rpc_task *task,
270 struct nfs_pgio_header *hdr)
273 loff_t pos = hdr->args.offset + hdr->res.count;
274 unsigned int new = pos - hdr->io_start;
276 if (hdr->good_bytes > new) {
277 hdr->good_bytes = new;
278 set_bit(NFS_IOHDR_EOF, &hdr->flags);
279 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
281 } else if (hdr->res.count < hdr->args.count)
282 nfs_readpage_retry(task, hdr);
286 readpage_async_filler(struct nfs_readdesc *desc, struct page *page)
288 struct inode *inode = page_file_mapping(page)->host;
289 unsigned int rsize = NFS_SERVER(inode)->rsize;
290 struct nfs_page *new;
291 unsigned int len, aligned_len;
294 len = nfs_page_length(page);
296 return nfs_return_empty_page(page);
298 aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
300 if (!IS_SYNC(page->mapping->host)) {
301 error = nfs_fscache_read_page(page->mapping->host, page);
306 new = nfs_create_request(desc->ctx, page, 0, aligned_len);
311 zero_user_segment(page, len, PAGE_SIZE);
312 if (!nfs_pageio_add_request(&desc->pgio, new)) {
313 nfs_list_remove_request(new);
314 error = desc->pgio.pg_error;
315 nfs_readpage_release(new, error);
320 error = PTR_ERR(new);
328 * Read a page over NFS.
329 * We read the page synchronously in the following case:
330 * - The error flag is set for this page. This happens only when a
331 * previous async read operation failed.
333 int nfs_read_folio(struct file *file, struct folio *folio)
335 struct page *page = &folio->page;
336 struct nfs_readdesc desc;
337 struct inode *inode = page_file_mapping(page)->host;
340 trace_nfs_aop_readpage(inode, page);
341 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
342 task_io_account_read(folio_size(folio));
345 * Try to flush any pending writes to the file..
347 * NOTE! Because we own the page lock, there cannot
348 * be any new pending writes generated at this point
349 * for this page (other pages can be written to).
351 ret = nfs_wb_page(inode, page);
354 if (PageUptodate(page))
358 if (NFS_STALE(inode))
363 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
364 if (desc.ctx == NULL)
367 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
369 xchg(&desc.ctx->error, 0);
370 nfs_pageio_init_read(&desc.pgio, inode, false,
371 &nfs_async_read_completion_ops);
373 ret = readpage_async_filler(&desc, page);
377 nfs_pageio_complete_read(&desc.pgio);
378 ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
380 ret = wait_on_page_locked_killable(page);
381 if (!PageUptodate(page) && !ret)
382 ret = xchg(&desc.ctx->error, 0);
385 put_nfs_open_context(desc.ctx);
386 trace_nfs_aop_readpage_done(inode, page, ret);
390 trace_nfs_aop_readpage_done(inode, page, ret);
394 void nfs_readahead(struct readahead_control *ractl)
396 unsigned int nr_pages = readahead_count(ractl);
397 struct file *file = ractl->file;
398 struct nfs_readdesc desc;
399 struct inode *inode = ractl->mapping->host;
403 trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
404 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
405 task_io_account_read(readahead_length(ractl));
408 if (NFS_STALE(inode))
413 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
414 if (desc.ctx == NULL)
417 desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
419 nfs_pageio_init_read(&desc.pgio, inode, false,
420 &nfs_async_read_completion_ops);
422 while ((page = readahead_page(ractl)) != NULL) {
423 ret = readpage_async_filler(&desc, page);
429 nfs_pageio_complete_read(&desc.pgio);
431 put_nfs_open_context(desc.ctx);
433 trace_nfs_aop_readahead_done(inode, nr_pages, ret);
436 int __init nfs_init_readpagecache(void)
438 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
439 sizeof(struct nfs_pgio_header),
440 0, SLAB_HWCACHE_ALIGN,
442 if (nfs_rdata_cachep == NULL)
448 void nfs_destroy_readpagecache(void)
450 kmem_cache_destroy(nfs_rdata_cachep);
453 static const struct nfs_rw_ops nfs_rw_read_ops = {
454 .rw_alloc_header = nfs_readhdr_alloc,
455 .rw_free_header = nfs_readhdr_free,
456 .rw_done = nfs_readpage_done,
457 .rw_result = nfs_readpage_result,
458 .rw_initiate = nfs_initiate_read,