Currently batch free handles request memory freeing and ctx ref putting
together. Separate them and use different counters, that will be needed
for reusing reqs memory.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
+ int ctx_refs;
struct task_struct *task;
int task_refs;
{
rb->to_free = 0;
rb->task_refs = 0;
+ rb->ctx_refs = 0;
rb->task = NULL;
}
struct req_batch *rb)
{
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
- percpu_ref_put_many(&ctx->refs, rb->to_free);
rb->to_free = 0;
}
io_put_task(rb->task, rb->task_refs);
rb->task = NULL;
}
+ if (rb->ctx_refs)
+ percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
}
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
rb->task_refs = 0;
}
rb->task_refs++;
+ rb->ctx_refs++;
io_dismantle_req(req);
rb->reqs[rb->to_free++] = req;