}
}
-static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}
-void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
+static void io_req_normal_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
- if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
- (ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
- rcu_read_lock();
- io_req_local_work_add(req, flags);
- rcu_read_unlock();
- return;
- }
-
/* task_work already pending, we're done */
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return;
io_fallback_tw(tctx);
}
+void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
+{
+ if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ rcu_read_lock();
+ io_req_local_work_add(req, flags);
+ rcu_read_unlock();
+ } else {
+ io_req_normal_work_add(req);
+ }
+}
+
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{
struct llist_node *node;
io_task_work.node);
node = node->next;
- __io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL);
+ io_req_normal_work_add(req);
}
}
#endif
enum {
- /* don't use deferred task_work */
- IOU_F_TWQ_FORCE_NORMAL = 1,
-
/*
* A hint to not wake right away but delay until there are enough of
* tw's queued to match the number of CQEs the task is waiting for.
* Must not be used wirh requests generating more than one CQE.
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
*/
- IOU_F_TWQ_LAZY_WAKE = 2,
+ IOU_F_TWQ_LAZY_WAKE = 1,
};
enum {