svcrdma: Convert rdma_stat_sq_starve to a per-CPU counter
authorChuck Lever <chuck.lever@oracle.com>
Tue, 29 Dec 2020 20:55:17 +0000 (15:55 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 25 Jan 2021 14:36:28 +0000 (09:36 -0500)
Avoid the overhead of a memory bus lock cycle for counting a value
that is hardly every used.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index ff32c59..c06b16c 100644 (file)
@@ -69,7 +69,7 @@ extern unsigned int svcrdma_max_req_size;
 extern struct percpu_counter svcrdma_stat_recv;
 extern atomic_t rdma_stat_read;
 extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
+extern struct percpu_counter svcrdma_stat_sq_starve;
 extern atomic_t rdma_stat_rq_starve;
 extern atomic_t rdma_stat_rq_poll;
 extern atomic_t rdma_stat_rq_prod;
index 3e5e622..ee768d4 100644 (file)
@@ -66,7 +66,7 @@ static unsigned int max_max_inline = RPCRDMA_MAX_INLINE_THRESH;
 struct percpu_counter svcrdma_stat_recv;
 atomic_t rdma_stat_read;
 atomic_t rdma_stat_write;
-atomic_t rdma_stat_sq_starve;
+struct percpu_counter svcrdma_stat_sq_starve;
 atomic_t rdma_stat_rq_starve;
 atomic_t rdma_stat_rq_poll;
 atomic_t rdma_stat_rq_prod;
@@ -199,10 +199,10 @@ static struct ctl_table svcrdma_parm_table[] = {
        },
        {
                .procname       = "rdma_stat_sq_starve",
-               .data           = &rdma_stat_sq_starve,
-               .maxlen         = sizeof(atomic_t),
+               .data           = &svcrdma_stat_sq_starve,
+               .maxlen         = SVCRDMA_COUNTER_BUFSIZ,
                .mode           = 0644,
-               .proc_handler   = read_reset_stat,
+               .proc_handler   = svcrdma_counter_handler,
        },
        {
                .procname       = "rdma_stat_rq_starve",
@@ -267,6 +267,7 @@ static void svc_rdma_proc_cleanup(void)
        unregister_sysctl_table(svcrdma_table_header);
        svcrdma_table_header = NULL;
 
+       percpu_counter_destroy(&svcrdma_stat_sq_starve);
        percpu_counter_destroy(&svcrdma_stat_recv);
 }
 
@@ -280,11 +281,15 @@ static int svc_rdma_proc_init(void)
        rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL);
        if (rc)
                goto out_err;
+       rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL);
+       if (rc)
+               goto out_err;
 
        svcrdma_table_header = register_sysctl_table(svcrdma_root_table);
        return 0;
 
 out_err:
+       percpu_counter_destroy(&svcrdma_stat_recv);
        return rc;
 }
 
index 0b63e13..d7d98b2 100644 (file)
@@ -364,6 +364,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
                        return 0;
                }
 
+               percpu_counter_inc(&svcrdma_stat_sq_starve);
                trace_svcrdma_sq_full(rdma);
                atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
                wait_event(rdma->sc_send_wait,
index 68af79d..52c759a 100644 (file)
@@ -317,7 +317,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
        /* If the SQ is full, wait until an SQ entry is available */
        while (1) {
                if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
-                       atomic_inc(&rdma_stat_sq_starve);
+                       percpu_counter_inc(&svcrdma_stat_sq_starve);
                        trace_svcrdma_sq_full(rdma);
                        atomic_inc(&rdma->sc_sq_avail);
                        wait_event(rdma->sc_send_wait,