svcrdma: Clean up dto_q critical section in svc_rdma_recvfrom()
authorChuck Lever <chuck.lever@oracle.com>
Mon, 1 Mar 2021 15:44:49 +0000 (10:44 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Wed, 31 Mar 2021 19:58:48 +0000 (15:58 -0400)
This, to me, seems less cluttered and less redundant. I was hoping
it could help reduce lock contention on the dto_q lock by reducing
the size of the critical section, but alas, the only improvement is
readability.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

index 232860e..6be23ce 100644 (file)
@@ -794,22 +794,22 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
 
        rqstp->rq_xprt_ctxt = NULL;
 
+       ctxt = NULL;
        spin_lock(&rdma_xprt->sc_rq_dto_lock);
        ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
-       if (!ctxt) {
+       if (ctxt)
+               list_del(&ctxt->rc_list);
+       else
                /* No new incoming requests, terminate the loop */
                clear_bit(XPT_DATA, &xprt->xpt_flags);
-               spin_unlock(&rdma_xprt->sc_rq_dto_lock);
-               svc_xprt_received(xprt);
-               return 0;
-       }
-       list_del(&ctxt->rc_list);
        spin_unlock(&rdma_xprt->sc_rq_dto_lock);
-       percpu_counter_inc(&svcrdma_stat_recv);
 
        /* Unblock the transport for the next receive */
        svc_xprt_received(xprt);
+       if (!ctxt)
+               return 0;
 
+       percpu_counter_inc(&svcrdma_stat_recv);
        ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
                                   ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
                                   DMA_FROM_DEVICE);