net/tls: split tls_rx_reader_lock
authorHannes Reinecke <hare@suse.de>
Wed, 26 Jul 2023 19:15:55 +0000 (21:15 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 25 Oct 2023 10:03:12 +0000 (12:03 +0200)
[ Upstream commit f9ae3204fb45d0749befc1cdff50f691c7461e5a ]

Split tls_rx_reader_{lock,unlock} into an 'acquire/release' and
the actual locking part.
With that we can use the tls_rx_reader_lock in situations where
the socket is already locked.

Suggested-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/20230726191556.41714-6-hare@suse.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Stable-dep-of: 419ce133ab92 ("tcp: allow again tcp_disconnect() when threads are waiting")
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/tls/tls_sw.c

index 9be00eb..c5c8fda 100644 (file)
@@ -1851,13 +1851,10 @@ tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
        return sk_flush_backlog(sk);
 }
 
-static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
-                             bool nonblock)
+static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
+                                bool nonblock)
 {
        long timeo;
-       int err;
-
-       lock_sock(sk);
 
        timeo = sock_rcvtimeo(sk, nonblock);
 
@@ -1871,26 +1868,30 @@ static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
                              !READ_ONCE(ctx->reader_present), &wait);
                remove_wait_queue(&ctx->wq, &wait);
 
-               if (timeo <= 0) {
-                       err = -EAGAIN;
-                       goto err_unlock;
-               }
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       goto err_unlock;
-               }
+               if (timeo <= 0)
+                       return -EAGAIN;
+               if (signal_pending(current))
+                       return sock_intr_errno(timeo);
        }
 
        WRITE_ONCE(ctx->reader_present, 1);
 
        return 0;
+}
 
-err_unlock:
-       release_sock(sk);
+static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+                             bool nonblock)
+{
+       int err;
+
+       lock_sock(sk);
+       err = tls_rx_reader_acquire(sk, ctx, nonblock);
+       if (err)
+               release_sock(sk);
        return err;
 }
 
-static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
 {
        if (unlikely(ctx->reader_contended)) {
                if (wq_has_sleeper(&ctx->wq))
@@ -1902,6 +1903,11 @@ static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
        }
 
        WRITE_ONCE(ctx->reader_present, 0);
+}
+
+static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+{
+       tls_rx_reader_release(sk, ctx);
        release_sock(sk);
 }