io_uring/fdinfo: include locked hash table in fdinfo output
authorJens Axboe <axboe@kernel.dk>
Tue, 10 Jan 2023 17:24:52 +0000 (10:24 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 18 Jan 2023 10:58:15 +0000 (11:58 +0100)
commit ea97cbebaf861d99c3e892275147e6fca6d2c1ca upstream.

A previous commit split the hash table for polled requests into two
parts, but didn't get the fdinfo output updated. This means that it's
less useful for debugging, as we may think a given request is not pending
poll.

Fix this up by dumping the locked hash table contents too.

Fixes: 9ca9fb24d5fe ("io_uring: mutex locked poll hashing")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
io_uring/fdinfo.c

index 2e04850..882bd56 100644 (file)
@@ -170,12 +170,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                xa_for_each(&ctx->personalities, index, cred)
                        io_uring_show_cred(m, index, cred);
        }
-       if (has_lock)
-               mutex_unlock(&ctx->uring_lock);
 
        seq_puts(m, "PollList:\n");
        for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
                struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
+               struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
                struct io_kiocb *req;
 
                spin_lock(&hb->lock);
@@ -183,8 +182,17 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        task_work_pending(req->task));
                spin_unlock(&hb->lock);
+
+               if (!has_lock)
+                       continue;
+               hlist_for_each_entry(req, &hbl->list, hash_node)
+                       seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
+                                       task_work_pending(req->task));
        }
 
+       if (has_lock)
+               mutex_unlock(&ctx->uring_lock);
+
        seq_puts(m, "CqOverflowList:\n");
        spin_lock(&ctx->completion_lock);
        list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {