static void __io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_rsrc_node *rsrc_node = NULL;
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP))
}
io_put_kbuf_comp(req);
io_dismantle_req(req);
- io_req_put_rsrc(req);
+ rsrc_node = req->rsrc_node;
/*
* Selected buffer deallocation in io_clean_op() assumes that
* we don't hold ->completion_lock. Clean them here to avoid
ctx->locked_free_nr++;
}
io_cq_unlock_post(ctx);
+
+ io_put_rsrc_node(rsrc_node);
}
void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- io_req_put_rsrc(req);
+ io_put_rsrc_node(req->rsrc_node);
io_dismantle_req(req);
io_put_task_remote(req->task, 1);
io_rsrc_node_ref_zero(node);
}
-static inline void io_req_put_rsrc(struct io_kiocb *req)
+static inline void io_put_rsrc_node(struct io_rsrc_node *node)
{
- if (req->rsrc_node)
- io_rsrc_put_node(req->rsrc_node, 1);
+ if (node)
+ io_rsrc_put_node(node, 1);
}
static inline void io_req_put_rsrc_locked(struct io_kiocb *req,