SUNRPC: Set TCP_CORK until the transmit queue is empty
authorTrond Myklebust <trond.myklebust@hammerspace.com>
Tue, 9 Feb 2021 21:04:15 +0000 (16:04 -0500)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Mon, 5 Apr 2021 13:04:20 +0000 (09:04 -0400)
When we have multiple RPC requests queued up, it makes sense to set the
TCP_CORK option while the transmit queue is non-empty.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
include/linux/sunrpc/xprt.h
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c

index d2e97ee..d81fe8b 100644 (file)
@@ -247,6 +247,7 @@ struct rpc_xprt {
        struct rpc_task *       snd_task;       /* Task blocked in send */
 
        struct list_head        xmit_queue;     /* Send queue */
+       atomic_long_t           xmit_queuelen;
 
        struct svc_xprt         *bc_xprt;       /* NFSv4.1 backchannel */
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
index 691ccf8..a853f75 100644 (file)
@@ -1352,6 +1352,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
                list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
                INIT_LIST_HEAD(&req->rq_xmit2);
 out:
+               atomic_long_inc(&xprt->xmit_queuelen);
                set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
                spin_unlock(&xprt->queue_lock);
        }
@@ -1381,6 +1382,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task)
                }
        } else
                list_del(&req->rq_xmit2);
+       atomic_long_dec(&req->rq_xprt->xmit_queuelen);
 }
 
 /**
index e35760f..a64f5ed 100644 (file)
@@ -1018,6 +1018,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
         * to cope with writespace callbacks arriving _after_ we have
         * called sendmsg(). */
        req->rq_xtime = ktime_get();
+       tcp_sock_set_cork(transport->inet, true);
        while (1) {
                status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
                                           transport->xmit.offset, rm, &sent);
@@ -1032,6 +1033,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
                if (likely(req->rq_bytes_sent >= msglen)) {
                        req->rq_xmit_bytes_sent += transport->xmit.offset;
                        transport->xmit.offset = 0;
+                       if (atomic_long_read(&xprt->xmit_queuelen) == 1)
+                               tcp_sock_set_cork(transport->inet, false);
                        return 0;
                }
 
@@ -2163,6 +2166,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                }
 
                xs_tcp_set_socket_timeouts(xprt, sock);
+               tcp_sock_set_nodelay(sk);
 
                write_lock_bh(&sk->sk_callback_lock);
 
@@ -2177,7 +2181,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
 
                /* socket options */
                sock_reset_flag(sk, SOCK_LINGER);
-               tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
 
                xprt_clear_connected(xprt);