struct io_wqe *wqe;
struct io_wq_work *cur_work;
+ struct io_wq_work *next_work;
raw_spinlock_t lock;
struct completion ref_done;
raw_spin_lock(&worker->lock);
worker->cur_work = work;
+ worker->next_work = NULL;
raw_spin_unlock(&worker->lock);
}
* clear the stalled flag.
*/
work = io_get_next_work(acct, worker);
- if (work)
+ if (work) {
__io_worker_busy(wqe, worker);
+ /*
+ * Make sure cancelation can find this, even before
+ * it becomes the active work. That avoids a window
+ * where the work has been removed from our general
+ * work list, but isn't yet discoverable as the
+ * current work item for this worker.
+ */
+ raw_spin_lock(&worker->lock);
+ worker->next_work = work;
+ raw_spin_unlock(&worker->lock);
+ }
raw_spin_unlock(&wqe->lock);
if (!work)
break;
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}
+static bool __io_wq_worker_cancel(struct io_worker *worker,
+ struct io_cb_cancel_data *match,
+ struct io_wq_work *work)
+{
+ if (work && match->fn(work, match->data)) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ set_notify_signal(worker->task);
+ return true;
+ }
+
+ return false;
+}
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
* may dereference the passed in work.
*/
raw_spin_lock(&worker->lock);
- if (worker->cur_work &&
- match->fn(worker->cur_work, match->data)) {
- set_notify_signal(worker->task);
+ if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
+ __io_wq_worker_cancel(worker, match, worker->next_work))
match->nr_running++;
- }
raw_spin_unlock(&worker->lock);
return match->nr_running && !match->cancel_all;