{
struct io_rings *r = ctx->rings;
- return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
+ return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
}
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
- if (__io_cqring_events(ctx) == rings->cq_ring_entries)
+ if (__io_cqring_events(ctx) == ctx->cq_entries)
return NULL;
tail = ctx->cached_cq_tail++;
/* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
- struct io_rings *rings = ctx->rings;
unsigned long flags;
bool all_flushed, posted;
- if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
+ if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
return false;
posted = false;