}
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe,
- struct io_worker *worker,
- bool *stalled)
+ struct io_worker *worker)
__must_hold(wqe->lock)
{
struct io_wq_work_node *node, *prev;
}
if (stall_hash != -1U) {
+ /*
+ * Set this before dropping the lock to avoid racing with new
+ * work being added and clearing the stalled bit.
+ */
+ wqe->flags |= IO_WQE_FLAG_STALLED;
raw_spin_unlock(&wqe->lock);
io_wait_on_hash(wqe, stall_hash);
raw_spin_lock(&wqe->lock);
- *stalled = true;
}
return NULL;
do {
struct io_wq_work *work;
- bool stalled;
get_next:
/*
* If we got some work, mark us as busy. If we didn't, but
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
- stalled = false;
- work = io_get_next_work(wqe, worker, &stalled);
+ work = io_get_next_work(wqe, worker);
if (work)
__io_worker_busy(wqe, worker, work);
- else if (stalled)
- wqe->flags |= IO_WQE_FLAG_STALLED;
raw_spin_unlock(&wqe->lock);
if (!work)