static void io_eventfd_signal(struct io_ring_ctx *ctx)
{
struct io_ev_fd *ev_fd;
+ bool skip;
+
+ spin_lock(&ctx->completion_lock);
+ /*
+ * Eventfd should only get triggered when at least one event has been
+ * posted. Some applications rely on the eventfd notification count only
+ * changing IFF a new CQE has been added to the CQ ring. There's no
+ * depedency on 1:1 relationship between how many times this function is
+ * called (and hence the eventfd count) and number of CQEs posted to the
+ * CQ ring.
+ */
+ skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+ if (skip)
+ return;
rcu_read_lock();
/*
io_eventfd_signal(ctx);
}
-/*
- * This should only get called when at least one event has been posted.
- * Some applications rely on the eventfd notification count only changing
- * IFF a new CQE has been added to the CQ ring. There's no depedency on
- * 1:1 relationship between how many times this function is called (and
- * hence the eventfd count) and number of CQEs posted to the CQ ring.
- */
void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
/* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
- bool all_flushed, posted;
+ bool all_flushed;
size_t cqe_size = sizeof(struct io_uring_cqe);
if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1;
- posted = false;
spin_lock(&ctx->completion_lock);
while (!list_empty(&ctx->cq_overflow_list)) {
struct io_uring_cqe *cqe = io_get_cqe(ctx);
else
io_account_cq_overflow(ctx);
- posted = true;
list_del(&ocqe->list);
kfree(ocqe);
}
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
return all_flushed;
}
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (filled)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
return filled;
}
static void __io_req_find_next_prep(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- bool posted;
spin_lock(&ctx->completion_lock);
- posted = io_disarm_next(req);
+ io_disarm_next(req);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
kfree(ev_fd);
return ret;
}
+
+ spin_lock(&ctx->completion_lock);
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+
ev_fd->eventfd_async = eventfd_async;
ctx->has_evfd = true;
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);