io_uring/af_unix: defer registered files gc to io_uring release
authorPavel Begunkov <asml.silence@gmail.com>
Sun, 16 Oct 2022 21:42:54 +0000 (22:42 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 26 Oct 2022 10:35:52 +0000 (12:35 +0200)
[ upstream commit 0091bfc81741b8d3aeb3b7ab8636f911b2de6e80 ]

Instead of putting io_uring's registered files in unix_gc() we want it
to be done by io_uring itself. The trick here is to consider io_uring
registered files for cycle detection but not actually putting them down.
Because io_uring can't register other ring instances, this will remove
all refs to the ring file triggering the ->release path and clean up
with io_ring_ctx_free().

Cc: stable@vger.kernel.org
Fixes: 6b06314c47e1 ("io_uring: add file set registration")
Reported-and-tested-by: David Bouman <dbouman03@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
[axboe: add kerneldoc comment to skb, fold in skb leak fix]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/io_uring.c
include/linux/skbuff.h
net/unix/garbage.c

index af856a8..b94cbac 100644 (file)
@@ -8066,6 +8066,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
        }
 
        skb->sk = sk;
+       skb->scm_io_uring = 1;
 
        nr_files = 0;
        fpl->user = get_uid(current_user());
index cfb889f..19e595c 100644 (file)
@@ -725,6 +725,7 @@ typedef unsigned char *sk_buff_data_t;
  *     @csum_level: indicates the number of consecutive checksums found in
  *             the packet minus one that have been verified as
  *             CHECKSUM_UNNECESSARY (max 3)
+ *     @scm_io_uring: SKB holds io_uring registered files
  *     @dst_pending_confirm: need to confirm neighbour
  *     @decrypted: Decrypted SKB
  *     @slow_gro: state present at GRO time, slower prepare step required
@@ -910,6 +911,7 @@ struct sk_buff {
        __u8                    decrypted:1;
 #endif
        __u8                    slow_gro:1;
+       __u8                    scm_io_uring:1;
 
 #ifdef CONFIG_NET_SCHED
        __u16                   tc_index;       /* traffic control index */
index d45d536..dc27635 100644 (file)
@@ -204,6 +204,7 @@ void wait_for_unix_gc(void)
 /* The external entry point: unix_gc() */
 void unix_gc(void)
 {
+       struct sk_buff *next_skb, *skb;
        struct unix_sock *u;
        struct unix_sock *next;
        struct sk_buff_head hitlist;
@@ -297,11 +298,30 @@ void unix_gc(void)
 
        spin_unlock(&unix_gc_lock);
 
+       /* We need io_uring to clean its registered files, ignore all io_uring
+        * originated skbs. It's fine as io_uring doesn't keep references to
+        * other io_uring instances and so killing all other files in the cycle
+        * will put all io_uring references forcing it to go through normal
+        * release.path eventually putting registered files.
+        */
+       skb_queue_walk_safe(&hitlist, skb, next_skb) {
+               if (skb->scm_io_uring) {
+                       __skb_unlink(skb, &hitlist);
+                       skb_queue_tail(&skb->sk->sk_receive_queue, skb);
+               }
+       }
+
        /* Here we are. Hitlist is filled. Die. */
        __skb_queue_purge(&hitlist);
 
        spin_lock(&unix_gc_lock);
 
+       /* There could be io_uring registered files, just push them back to
+        * the inflight list
+        */
+       list_for_each_entry_safe(u, next, &gc_candidates, link)
+               list_move_tail(&u->link, &gc_inflight_list);
+
        /* All candidates should have been detached by now. */
        BUG_ON(!list_empty(&gc_candidates));