return io_wq_current_is_worker();
}
+static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
+{
+ return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+}
+
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (waitqueue_active(&ctx->wait))
unsigned long flags;
LIST_HEAD(list);
- if (!force) {
- if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
- rings->cq_ring_entries))
- return false;
- }
+ if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
+ return false;
spin_lock_irqsave(&ctx->completion_lock, flags);
-
cqe = NULL;
list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
if (!io_match_task(req, tsk, files))
static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{
- struct io_rings *rings = ctx->rings;
-
if (test_bit(0, &ctx->cq_check_overflow)) {
/*
* noflush == true is from the waitqueue handler, just ensure
/* See comment at the top of this file */
smp_rmb();
- return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
+ return __io_cqring_events(ctx);
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)