};
static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
- unsigned long start_jiffies)
+ unsigned long start_jiffies, bool cap_entries)
{
unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
struct io_sq_data *sqd = ctx->sq_data;
finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
io_ring_clear_wakeup_flag(ctx);
+ /* if we're handling multiple rings, cap submit size for fairness */
+ if (cap_entries && to_submit > 8)
+ to_submit = 8;
+
mutex_lock(&ctx->uring_lock);
if (likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit);
start_jiffies = jiffies;
while (!kthread_should_stop()) {
enum sq_ret ret = 0;
+ bool cap_entries;
/*
* Any changes to the sqd lists are synchronized through the
if (unlikely(!list_empty(&sqd->ctx_new_list)))
io_sqd_init_new(sqd);
+ cap_entries = !list_is_singular(&sqd->ctx_list);
+
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
if (current->cred != ctx->creds) {
if (old_cred)
old_cred = override_creds(ctx->creds);
}
- ret |= __io_sq_thread(ctx, start_jiffies);
+ ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
io_sq_thread_drop_mm();
}