NFS: Fix fscache read from NFS after cache error
authorDave Wysochanski <dwysocha@redhat.com>
Tue, 29 Jun 2021 17:13:57 +0000 (13:13 -0400)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Thu, 8 Jul 2021 18:03:26 +0000 (14:03 -0400)
Earlier commits refactored some NFS read code and removed
nfs_readpage_async(), but neglected to properly fixup
nfs_readpage_from_fscache_complete().  The code path is
only hit when something unusual occurs with the cachefiles
backing filesystem, such as an IO error or while a cookie
is being invalidated.

Mark page with PG_checked if fscache IO completes in error,
unlock the page, and let the VM decide to re-issue based on
PG_uptodate.  When the VM reissues the readpage, PG_checked
allows us to skip over fscache and read from the server.

Link: https://marc.info/?l=linux-nfs&m=162498209518739
Fixes: 1e83b173b266 ("NFS: Add nfs_pageio_complete_read() and remove nfs_readpage_async()")
Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
fs/nfs/fscache.c
fs/nfs/read.c

index c4c021c6ebbd8bc0396f00257ac421045ab541fa..d743629e05e1248a05837a92ba1c2468a63d204e 100644 (file)
@@ -385,12 +385,15 @@ static void nfs_readpage_from_fscache_complete(struct page *page,
                 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
                 page, context, error);
 
-       /* if the read completes with an error, we just unlock the page and let
-        * the VM reissue the readpage */
-       if (!error) {
+       /*
+        * If the read completes with an error, mark the page with PG_checked,
+        * unlock the page, and let the VM reissue the readpage.
+        */
+       if (!error)
                SetPageUptodate(page);
-               unlock_page(page);
-       }
+       else
+               SetPageChecked(page);
+       unlock_page(page);
 }
 
 /*
@@ -405,6 +408,11 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
                 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
                 nfs_i_fscache(inode), page, page->index, page->flags, inode);
 
+       if (PageChecked(page)) {
+               ClearPageChecked(page);
+               return 1;
+       }
+
        ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
                                         page,
                                         nfs_readpage_from_fscache_complete,
index eb390eb618b30b3829ccc8cb921cf183d57b17a6..9f39e0a1a38bd17de21ef5a675f3c4cae8a1df05 100644 (file)
@@ -362,13 +362,13 @@ int nfs_readpage(struct file *file, struct page *page)
        } else
                desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
 
+       xchg(&desc.ctx->error, 0);
        if (!IS_SYNC(inode)) {
                ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
                if (ret == 0)
-                       goto out;
+                       goto out_wait;
        }
 
-       xchg(&desc.ctx->error, 0);
        nfs_pageio_init_read(&desc.pgio, inode, false,
                             &nfs_async_read_completion_ops);
 
@@ -378,6 +378,7 @@ int nfs_readpage(struct file *file, struct page *page)
 
        nfs_pageio_complete_read(&desc.pgio);
        ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
+out_wait:
        if (!ret) {
                ret = wait_on_page_locked_killable(page);
                if (!PageUptodate(page) && !ret)