rxrpc: Fix potential deadlock
authorDavid Howells <dhowells@redhat.com>
Tue, 30 Jul 2019 13:42:50 +0000 (14:42 +0100)
committerDavid Howells <dhowells@redhat.com>
Tue, 30 Jul 2019 13:42:50 +0000 (14:42 +0100)
There is a potential deadlock in rxrpc_peer_keepalive_dispatch() whereby
rxrpc_put_peer() is called with the peer_hash_lock held, but if it reduces
the peer's refcount to 0, rxrpc_put_peer() calls __rxrpc_put_peer() - which
the tries to take the already held lock.

Fix this by providing a version of rxrpc_put_peer() that can be called in
situations where the lock is already held.

The bug may produce the following lockdep report:

============================================
WARNING: possible recursive locking detected
5.2.0-next-20190718 #41 Not tainted
--------------------------------------------
kworker/0:3/21678 is trying to acquire lock:
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh
/./include/linux/spinlock.h:343 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
__rxrpc_put_peer /net/rxrpc/peer_object.c:415 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_put_peer+0x2d3/0x6a0 /net/rxrpc/peer_object.c:435

but task is already holding lock:
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh
/./include/linux/spinlock.h:343 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_peer_keepalive_dispatch /net/rxrpc/peer_event.c:378 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_peer_keepalive_worker+0x6b3/0xd02 /net/rxrpc/peer_event.c:430

Fixes: 330bdcfadcee ("rxrpc: Fix the keepalive generator [ver #2]")
Reported-by: syzbot+72af434e4b3417318f84@syzkaller.appspotmail.com
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
net/rxrpc/ar-internal.h
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c

index 80335b4..822f453 100644 (file)
@@ -1061,6 +1061,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
 
 /*
  * proc.c
index 9f2f45c..7666ec7 100644 (file)
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
                spin_lock_bh(&rxnet->peer_hash_lock);
                list_add_tail(&peer->keepalive_link,
                              &rxnet->peer_keepalive[slot & mask]);
-               rxrpc_put_peer(peer);
+               rxrpc_put_peer_locked(peer);
        }
 
        spin_unlock_bh(&rxnet->peer_hash_lock);
index 9d3ce81..9c3ac96 100644 (file)
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 }
 
 /*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+       const void *here = __builtin_return_address(0);
+       int n;
+
+       n = atomic_dec_return(&peer->usage);
+       trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+       if (n == 0) {
+               hash_del_rcu(&peer->hash_link);
+               list_del_init(&peer->keepalive_link);
+               kfree_rcu(peer, rcu);
+       }
+}
+
+/*
  * Make sure all peer records have been discarded.
  */
 void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)