NFSD: Refactor nfsd_reply_cache_free_locked()
authorChuck Lever <chuck.lever@oracle.com>
Sun, 9 Jul 2023 15:45:16 +0000 (11:45 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Tue, 29 Aug 2023 21:45:22 +0000 (17:45 -0400)
To reduce contention on the bucket locks, we must avoid calling
kfree() while each bucket lock is held.

Start by refactoring nfsd_reply_cache_free_locked() into a helper
that removes an entry from the bucket (and must therefore run under
the lock) and a second helper that frees the entry (which does not
need to hold the lock).

For readability, rename the helpers nfsd_cacherep_<verb>.

Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
fs/nfsd/nfscache.c

index a8eda1c..3fde7f6 100644 (file)
@@ -110,21 +110,33 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
        return rp;
 }
 
+static void nfsd_cacherep_free(struct svc_cacherep *rp)
+{
+       if (rp->c_type == RC_REPLBUFF)
+               kfree(rp->c_replvec.iov_base);
+       kmem_cache_free(drc_slab, rp);
+}
+
 static void
-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
-                               struct nfsd_net *nn)
+nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+                           struct svc_cacherep *rp)
 {
-       if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
+       if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
                nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
-               kfree(rp->c_replvec.iov_base);
-       }
        if (rp->c_state != RC_UNUSED) {
                rb_erase(&rp->c_node, &b->rb_head);
                list_del(&rp->c_lru);
                atomic_dec(&nn->num_drc_entries);
                nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
        }
-       kmem_cache_free(drc_slab, rp);
+}
+
+static void
+nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+                               struct nfsd_net *nn)
+{
+       nfsd_cacherep_unlink_locked(nn, b, rp);
+       nfsd_cacherep_free(rp);
 }
 
 static void
@@ -132,8 +144,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
                        struct nfsd_net *nn)
 {
        spin_lock(&b->cache_lock);
-       nfsd_reply_cache_free_locked(b, rp, nn);
+       nfsd_cacherep_unlink_locked(nn, b, rp);
        spin_unlock(&b->cache_lock);
+       nfsd_cacherep_free(rp);
 }
 
 int nfsd_drc_slab_create(void)