static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
struct io_cancel_data *cd,
+ struct io_hash_bucket hash_table[],
struct io_hash_bucket **out_bucket)
{
struct io_kiocb *req;
u32 index = hash_long(cd->data, ctx->cancel_hash_bits);
- struct io_hash_bucket *hb = &ctx->cancel_hash[index];
+ struct io_hash_bucket *hb = &hash_table[index];
*out_bucket = NULL;
static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
struct io_cancel_data *cd,
+ struct io_hash_bucket hash_table[],
struct io_hash_bucket **out_bucket)
{
struct io_kiocb *req;
*out_bucket = NULL;
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
- struct io_hash_bucket *hb = &ctx->cancel_hash[i];
+ struct io_hash_bucket *hb = &hash_table[i];
spin_lock(&hb->lock);
hlist_for_each_entry(req, &hb->list, hash_node) {
return true;
}
-int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
+static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ struct io_hash_bucket hash_table[])
{
struct io_hash_bucket *bucket;
struct io_kiocb *req;
if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
- req = io_poll_file_find(ctx, cd, &bucket);
+ req = io_poll_file_find(ctx, cd, ctx->cancel_hash, &bucket);
else
- req = io_poll_find(ctx, false, cd, &bucket);
+ req = io_poll_find(ctx, false, cd, ctx->cancel_hash, &bucket);
if (req)
io_poll_cancel_req(req);
return req ? 0 : -ENOENT;
}
+int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
+{
+ return __io_poll_cancel(ctx, cd, ctx->cancel_hash);
+}
+
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
unsigned int flags)
{
int ret2, ret = 0;
bool locked;
- preq = io_poll_find(ctx, true, &cd, &bucket);
+ preq = io_poll_find(ctx, true, &cd, ctx->cancel_hash, &bucket);
if (preq)
ret2 = io_poll_disarm(preq);
if (bucket)