svcrdma: Allocate recv_ctxt's on CPU handling Receives
authorChuck Lever <chuck.lever@oracle.com>
Mon, 7 May 2018 19:27:48 +0000 (15:27 -0400)
committerJ. Bruce Fields <bfields@redhat.com>
Fri, 11 May 2018 19:48:57 +0000 (15:48 -0400)
There is a significant latency penalty when processing an ingress
Receive if the Receive buffer resides in memory that is not on the
same NUMA node as the the CPU handling completions for a CQ.

The system administrator and the device driver determine which CPU
handles completions. This CPU does not change during life of the CQ.
Further the Upper Layer does not have any visibility of which CPU it
is.

Allocating Receive buffers in the Receive completion handler
guarantees that Receive buffers are allocated on the preferred NUMA
node for that CQ.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

index 01baabfb863b4437f0d5c02abfd708d9f9cbe937..27cf59c7085f14157e8d1e4322f003aff6f77a8d 100644 (file)
@@ -151,6 +151,7 @@ struct svc_rdma_recv_ctxt {
        struct ib_sge           rc_recv_sge;
        void                    *rc_recv_buf;
        struct xdr_buf          rc_arg;
+       bool                    rc_temp;
        u32                     rc_byte_len;
        unsigned int            rc_page_count;
        unsigned int            rc_hdr_count;
index d4ccd1c0142cb6984fd582fb341147c4a539900d..0445e75d76a254e282e766b9b4e97a1f21433736 100644 (file)
@@ -144,6 +144,7 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
        ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
        ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
        ctxt->rc_recv_buf = buffer;
+       ctxt->rc_temp = false;
        return ctxt;
 
 fail2:
@@ -154,6 +155,15 @@ fail0:
        return NULL;
 }
 
+static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
+                                      struct svc_rdma_recv_ctxt *ctxt)
+{
+       ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
+                           ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
+       kfree(ctxt->rc_recv_buf);
+       kfree(ctxt);
+}
+
 /**
  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
  * @rdma: svcxprt_rdma being torn down
@@ -165,12 +175,7 @@ void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
 
        while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
                list_del(&ctxt->rc_list);
-               ib_dma_unmap_single(rdma->sc_pd->device,
-                                   ctxt->rc_recv_sge.addr,
-                                   ctxt->rc_recv_sge.length,
-                                   DMA_FROM_DEVICE);
-               kfree(ctxt->rc_recv_buf);
-               kfree(ctxt);
+               svc_rdma_recv_ctxt_destroy(rdma, ctxt);
        }
 }
 
@@ -212,21 +217,21 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
 
        for (i = 0; i < ctxt->rc_page_count; i++)
                put_page(ctxt->rc_pages[i]);
-       spin_lock(&rdma->sc_recv_lock);
-       list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
-       spin_unlock(&rdma->sc_recv_lock);
+
+       if (!ctxt->rc_temp) {
+               spin_lock(&rdma->sc_recv_lock);
+               list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
+               spin_unlock(&rdma->sc_recv_lock);
+       } else
+               svc_rdma_recv_ctxt_destroy(rdma, ctxt);
 }
 
-static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
+                               struct svc_rdma_recv_ctxt *ctxt)
 {
-       struct svc_rdma_recv_ctxt *ctxt;
        struct ib_recv_wr *bad_recv_wr;
        int ret;
 
-       ctxt = svc_rdma_recv_ctxt_get(rdma);
-       if (!ctxt)
-               return -ENOMEM;
-
        svc_xprt_get(&rdma->sc_xprt);
        ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
        trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
@@ -240,6 +245,16 @@ err_post:
        return ret;
 }
 
+static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+{
+       struct svc_rdma_recv_ctxt *ctxt;
+
+       ctxt = svc_rdma_recv_ctxt_get(rdma);
+       if (!ctxt)
+               return -ENOMEM;
+       return __svc_rdma_post_recv(rdma, ctxt);
+}
+
 /**
  * svc_rdma_post_recvs - Post initial set of Recv WRs
  * @rdma: fresh svcxprt_rdma
@@ -248,11 +263,16 @@ err_post:
  */
 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
 {
+       struct svc_rdma_recv_ctxt *ctxt;
        unsigned int i;
        int ret;
 
        for (i = 0; i < rdma->sc_max_requests; i++) {
-               ret = svc_rdma_post_recv(rdma);
+               ctxt = svc_rdma_recv_ctxt_get(rdma);
+               if (!ctxt)
+                       return -ENOMEM;
+               ctxt->rc_temp = true;
+               ret = __svc_rdma_post_recv(rdma, ctxt);
                if (ret) {
                        pr_err("svcrdma: failure posting recv buffers: %d\n",
                               ret);