net/smc: handle sockopt TCP_DEFER_ACCEPT
authorUrsula Braun <ubraun@linux.ibm.com>
Thu, 26 Apr 2018 15:18:23 +0000 (17:18 +0200)
committerDavid S. Miller <davem@davemloft.net>
Fri, 27 Apr 2018 18:02:52 +0000 (14:02 -0400)
If sockopt TCP_DEFER_ACCEPT is set, the accept is delayed till
data is available.

Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_rx.c
net/smc/smc_rx.h

index 9d8b381..20aa417 100644 (file)
@@ -1044,6 +1044,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 
        if (lsmc->sk.sk_state != SMC_LISTEN) {
                rc = -EINVAL;
+               release_sock(sk);
                goto out;
        }
 
@@ -1071,9 +1072,29 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
 
        if (!rc)
                rc = sock_error(nsk);
+       release_sock(sk);
+       if (rc)
+               goto out;
+
+       if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
+               /* wait till data arrives on the socket */
+               timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
+                                                               MSEC_PER_SEC);
+               if (smc_sk(nsk)->use_fallback) {
+                       struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
+
+                       lock_sock(clcsk);
+                       if (skb_queue_empty(&clcsk->sk_receive_queue))
+                               sk_wait_data(clcsk, &timeo, NULL);
+                       release_sock(clcsk);
+               } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
+                       lock_sock(nsk);
+                       smc_rx_wait_data(smc_sk(nsk), &timeo);
+                       release_sock(nsk);
+               }
+       }
 
 out:
-       release_sock(sk);
        sock_put(sk); /* sock_hold above */
        return rc;
 }
@@ -1340,6 +1361,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
                                                 0);
                }
                break;
+       case TCP_DEFER_ACCEPT:
+               smc->sockopt_defer_accept = val;
+               break;
        default:
                break;
        }
index e4829a2..2405e88 100644 (file)
@@ -180,6 +180,10 @@ struct smc_sock {                          /* smc sock container */
        struct list_head        accept_q;       /* sockets to be accepted */
        spinlock_t              accept_q_lock;  /* protects accept_q */
        bool                    use_fallback;   /* fallback to tcp */
+       int                     sockopt_defer_accept;
+                                               /* sockopt TCP_DEFER_ACCEPT
+                                                * value
+                                                */
        u8                      wait_close_tx_prepared : 1;
                                                /* shutdown wr or close
                                                 * started, waiting for unsent
index eff4e0d..af851d8 100644 (file)
@@ -51,7 +51,7 @@ static void smc_rx_data_ready(struct sock *sk)
  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
  */
-static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
+int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct smc_connection *conn = &smc->conn;
index 3a32b59..0b75a6b 100644 (file)
@@ -20,5 +20,6 @@
 void smc_rx_init(struct smc_sock *smc);
 int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
                   int flags);
+int smc_rx_wait_data(struct smc_sock *smc, long *timeo);
 
 #endif /* SMC_RX_H */