nfsd: don't free files unconditionally in __nfsd_file_cache_purge
authorJeff Layton <jlayton@kernel.org>
Fri, 20 Jan 2023 19:52:14 +0000 (14:52 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 23 Jan 2023 14:51:17 +0000 (09:51 -0500)
nfsd_file_cache_purge is called when the server is shutting down, in
which case, tearing things down is generally fine, but it also gets
called when the exports cache is flushed.

Instead of walking the cache and freeing everything unconditionally,
handle it the same as when we have a notification of conflicting access.

Fixes: ac3a2585f018 ("nfsd: rework refcounting in filecache")
Reported-by: Ruben Vestergaard <rubenv@drcmr.dk>
Reported-by: Torkil Svensgaard <torkil@drcmr.dk>
Reported-by: Shachar Kagan <skagan@nvidia.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Tested-by: Shachar Kagan <skagan@nvidia.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
fs/nfsd/filecache.c

index 0ef0703..c0950ed 100644 (file)
@@ -662,6 +662,39 @@ static struct shrinker     nfsd_file_shrinker = {
 };
 
 /**
+ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
+ * @nf: nfsd_file to attempt to queue
+ * @dispose: private list to queue successfully-put objects
+ *
+ * Unhash an nfsd_file, try to get a reference to it, and then put that
+ * reference. If it's the last reference, queue it to the dispose list.
+ */
+static void
+nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+       __must_hold(RCU)
+{
+       int decrement = 1;
+
+       /* If we raced with someone else unhashing, ignore it */
+       if (!nfsd_file_unhash(nf))
+               return;
+
+       /* If we can't get a reference, ignore it */
+       if (!nfsd_file_get(nf))
+               return;
+
+       /* Extra decrement if we remove from the LRU */
+       if (nfsd_file_lru_remove(nf))
+               ++decrement;
+
+       /* If refcount goes to 0, then put on the dispose list */
+       if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+               list_add(&nf->nf_lru, dispose);
+               trace_nfsd_file_closing(nf);
+       }
+}
+
+/**
  * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
  * @inode:   inode on which to close out nfsd_files
  * @dispose: list on which to gather nfsd_files to close out
@@ -688,30 +721,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
 
        rcu_read_lock();
        do {
-               int decrement = 1;
-
                nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
                                       nfsd_file_rhash_params);
                if (!nf)
                        break;
-
-               /* If we raced with someone else unhashing, ignore it */
-               if (!nfsd_file_unhash(nf))
-                       continue;
-
-               /* If we can't get a reference, ignore it */
-               if (!nfsd_file_get(nf))
-                       continue;
-
-               /* Extra decrement if we remove from the LRU */
-               if (nfsd_file_lru_remove(nf))
-                       ++decrement;
-
-               /* If refcount goes to 0, then put on the dispose list */
-               if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
-                       list_add(&nf->nf_lru, dispose);
-                       trace_nfsd_file_closing(nf);
-               }
+               nfsd_file_cond_queue(nf, dispose);
        } while (1);
        rcu_read_unlock();
 }
@@ -928,11 +942,8 @@ __nfsd_file_cache_purge(struct net *net)
 
                nf = rhashtable_walk_next(&iter);
                while (!IS_ERR_OR_NULL(nf)) {
-                       if (!net || nf->nf_net == net) {
-                               nfsd_file_unhash(nf);
-                               nfsd_file_lru_remove(nf);
-                               list_add(&nf->nf_lru, &dispose);
-                       }
+                       if (!net || nf->nf_net == net)
+                               nfsd_file_cond_queue(nf, &dispose);
                        nf = rhashtable_walk_next(&iter);
                }