1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/tracehook.h>
17 #include <linux/audit.h>
18 #include <uapi/linux/io_uring.h>
22 #define WORKER_IDLE_TIMEOUT (5 * HZ)
25 IO_WORKER_F_UP = 1, /* up and active */
26 IO_WORKER_F_RUNNING = 2, /* account as running */
27 IO_WORKER_F_FREE = 4, /* worker on free list */
28 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
32 IO_WQ_BIT_EXIT = 0, /* wq exiting */
36 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
40 * One for each thread in a wqe pool
45 struct hlist_nulls_node nulls_node;
46 struct list_head all_list;
47 struct task_struct *task;
50 struct io_wq_work *cur_work;
51 struct io_wq_work *next_work;
54 struct completion ref_done;
56 unsigned long create_state;
57 struct callback_head create_work;
62 struct work_struct work;
66 #if BITS_PER_LONG == 64
67 #define IO_WQ_HASH_ORDER 6
69 #define IO_WQ_HASH_ORDER 5
72 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
80 struct io_wq_work_list work_list;
91 * Per-node worker thread pool
95 struct io_wqe_acct acct[IO_WQ_ACCT_NR];
99 struct hlist_nulls_head free_list;
100 struct list_head all_list;
102 struct wait_queue_entry wait;
105 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
107 cpumask_var_t cpu_mask;
116 free_work_fn *free_work;
117 io_wq_work_fn *do_work;
119 struct io_wq_hash *hash;
121 atomic_t worker_refs;
122 struct completion worker_done;
124 struct hlist_node cpuhp_node;
126 struct task_struct *task;
128 struct io_wqe *wqes[];
131 static enum cpuhp_state io_wq_online;
133 struct io_cb_cancel_data {
141 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
142 static void io_wqe_dec_running(struct io_worker *worker);
143 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
144 struct io_wqe_acct *acct,
145 struct io_cb_cancel_data *match);
146 static void create_worker_cb(struct callback_head *cb);
147 static void io_wq_cancel_tw_create(struct io_wq *wq);
149 static bool io_worker_get(struct io_worker *worker)
151 return refcount_inc_not_zero(&worker->ref);
154 static void io_worker_release(struct io_worker *worker)
156 if (refcount_dec_and_test(&worker->ref))
157 complete(&worker->ref_done);
160 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
162 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
165 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
166 struct io_wq_work *work)
168 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
171 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
173 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
176 static void io_worker_ref_put(struct io_wq *wq)
178 if (atomic_dec_and_test(&wq->worker_refs))
179 complete(&wq->worker_done);
182 static void io_worker_cancel_cb(struct io_worker *worker)
184 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
185 struct io_wqe *wqe = worker->wqe;
186 struct io_wq *wq = wqe->wq;
188 atomic_dec(&acct->nr_running);
189 raw_spin_lock(&worker->wqe->lock);
191 raw_spin_unlock(&worker->wqe->lock);
192 io_worker_ref_put(wq);
193 clear_bit_unlock(0, &worker->create_state);
194 io_worker_release(worker);
197 static bool io_task_worker_match(struct callback_head *cb, void *data)
199 struct io_worker *worker;
201 if (cb->func != create_worker_cb)
203 worker = container_of(cb, struct io_worker, create_work);
204 return worker == data;
207 static void io_worker_exit(struct io_worker *worker)
209 struct io_wqe *wqe = worker->wqe;
210 struct io_wq *wq = wqe->wq;
213 struct callback_head *cb = task_work_cancel_match(wq->task,
214 io_task_worker_match, worker);
218 io_worker_cancel_cb(worker);
221 io_worker_release(worker);
222 wait_for_completion(&worker->ref_done);
224 raw_spin_lock(&wqe->lock);
225 if (worker->flags & IO_WORKER_F_FREE)
226 hlist_nulls_del_rcu(&worker->nulls_node);
227 list_del_rcu(&worker->all_list);
228 raw_spin_unlock(&wqe->lock);
229 io_wqe_dec_running(worker);
232 current->flags &= ~PF_IO_WORKER;
235 kfree_rcu(worker, rcu);
236 io_worker_ref_put(wqe->wq);
240 static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
244 raw_spin_lock(&acct->lock);
245 if (!wq_list_empty(&acct->work_list) &&
246 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
248 raw_spin_unlock(&acct->lock);
254 * Check head of free list for an available worker. If one isn't available,
255 * caller must create one.
257 static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
258 struct io_wqe_acct *acct)
261 struct hlist_nulls_node *n;
262 struct io_worker *worker;
265 * Iterate free_list and see if we can find an idle worker to
266 * activate. If a given worker is on the free_list but in the process
267 * of exiting, keep trying.
269 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
270 if (!io_worker_get(worker))
272 if (io_wqe_get_acct(worker) != acct) {
273 io_worker_release(worker);
276 if (wake_up_process(worker->task)) {
277 io_worker_release(worker);
280 io_worker_release(worker);
287 * We need a worker. If we find a free one, we're good. If not, and we're
288 * below the max number of workers, create one.
290 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
293 * Most likely an attempt to queue unbounded work on an io_wq that
294 * wasn't setup with any unbounded workers.
296 if (unlikely(!acct->max_workers))
297 pr_warn_once("io-wq is not configured for unbound workers");
299 raw_spin_lock(&wqe->lock);
300 if (acct->nr_workers >= acct->max_workers) {
301 raw_spin_unlock(&wqe->lock);
305 raw_spin_unlock(&wqe->lock);
306 atomic_inc(&acct->nr_running);
307 atomic_inc(&wqe->wq->worker_refs);
308 return create_io_worker(wqe->wq, wqe, acct->index);
311 static void io_wqe_inc_running(struct io_worker *worker)
313 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
315 atomic_inc(&acct->nr_running);
318 static void create_worker_cb(struct callback_head *cb)
320 struct io_worker *worker;
323 struct io_wqe_acct *acct;
324 bool do_create = false;
326 worker = container_of(cb, struct io_worker, create_work);
329 acct = &wqe->acct[worker->create_index];
330 raw_spin_lock(&wqe->lock);
331 if (acct->nr_workers < acct->max_workers) {
335 raw_spin_unlock(&wqe->lock);
337 create_io_worker(wq, wqe, worker->create_index);
339 atomic_dec(&acct->nr_running);
340 io_worker_ref_put(wq);
342 clear_bit_unlock(0, &worker->create_state);
343 io_worker_release(worker);
346 static bool io_queue_worker_create(struct io_worker *worker,
347 struct io_wqe_acct *acct,
348 task_work_func_t func)
350 struct io_wqe *wqe = worker->wqe;
351 struct io_wq *wq = wqe->wq;
353 /* raced with exit, just ignore create call */
354 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
356 if (!io_worker_get(worker))
359 * create_state manages ownership of create_work/index. We should
360 * only need one entry per worker, as the worker going to sleep
361 * will trigger the condition, and waking will clear it once it
362 * runs the task_work.
364 if (test_bit(0, &worker->create_state) ||
365 test_and_set_bit_lock(0, &worker->create_state))
368 atomic_inc(&wq->worker_refs);
369 init_task_work(&worker->create_work, func);
370 worker->create_index = acct->index;
371 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
373 * EXIT may have been set after checking it above, check after
374 * adding the task_work and remove any creation item if it is
375 * now set. wq exit does that too, but we can have added this
376 * work item after we canceled in io_wq_exit_workers().
378 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
379 io_wq_cancel_tw_create(wq);
380 io_worker_ref_put(wq);
383 io_worker_ref_put(wq);
384 clear_bit_unlock(0, &worker->create_state);
386 io_worker_release(worker);
388 atomic_dec(&acct->nr_running);
389 io_worker_ref_put(wq);
393 static void io_wqe_dec_running(struct io_worker *worker)
395 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
396 struct io_wqe *wqe = worker->wqe;
398 if (!(worker->flags & IO_WORKER_F_UP))
401 if (!atomic_dec_and_test(&acct->nr_running))
403 if (!io_acct_run_queue(acct))
406 atomic_inc(&acct->nr_running);
407 atomic_inc(&wqe->wq->worker_refs);
408 io_queue_worker_create(worker, acct, create_worker_cb);
412 * Worker will start processing some work. Move it to the busy list, if
413 * it's currently on the freelist
415 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
417 if (worker->flags & IO_WORKER_F_FREE) {
418 worker->flags &= ~IO_WORKER_F_FREE;
419 raw_spin_lock(&wqe->lock);
420 hlist_nulls_del_init_rcu(&worker->nulls_node);
421 raw_spin_unlock(&wqe->lock);
426 * No work, worker going to sleep. Move to freelist, and unuse mm if we
427 * have one attached. Dropping the mm may potentially sleep, so we drop
428 * the lock in that case and return success. Since the caller has to
429 * retry the loop in that case (we changed task state), we don't regrab
430 * the lock if we return success.
432 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
433 __must_hold(wqe->lock)
435 if (!(worker->flags & IO_WORKER_F_FREE)) {
436 worker->flags |= IO_WORKER_F_FREE;
437 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
441 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
443 return work->flags >> IO_WQ_HASH_SHIFT;
446 static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
448 struct io_wq *wq = wqe->wq;
451 spin_lock_irq(&wq->hash->wait.lock);
452 if (list_empty(&wqe->wait.entry)) {
453 __add_wait_queue(&wq->hash->wait, &wqe->wait);
454 if (!test_bit(hash, &wq->hash->map)) {
455 __set_current_state(TASK_RUNNING);
456 list_del_init(&wqe->wait.entry);
460 spin_unlock_irq(&wq->hash->wait.lock);
464 static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
465 struct io_worker *worker)
466 __must_hold(acct->lock)
468 struct io_wq_work_node *node, *prev;
469 struct io_wq_work *work, *tail;
470 unsigned int stall_hash = -1U;
471 struct io_wqe *wqe = worker->wqe;
473 wq_list_for_each(node, prev, &acct->work_list) {
476 work = container_of(node, struct io_wq_work, list);
478 /* not hashed, can run anytime */
479 if (!io_wq_is_hashed(work)) {
480 wq_list_del(&acct->work_list, node, prev);
484 hash = io_get_work_hash(work);
485 /* all items with this hash lie in [work, tail] */
486 tail = wqe->hash_tail[hash];
488 /* hashed, can run if not already running */
489 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
490 wqe->hash_tail[hash] = NULL;
491 wq_list_cut(&acct->work_list, &tail->list, prev);
494 if (stall_hash == -1U)
496 /* fast forward to a next hash, for-each will fix up @prev */
500 if (stall_hash != -1U) {
504 * Set this before dropping the lock to avoid racing with new
505 * work being added and clearing the stalled bit.
507 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
508 raw_spin_unlock(&acct->lock);
509 unstalled = io_wait_on_hash(wqe, stall_hash);
510 raw_spin_lock(&acct->lock);
512 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
513 if (wq_has_sleeper(&wqe->wq->hash->wait))
514 wake_up(&wqe->wq->hash->wait);
521 static bool io_flush_signals(void)
523 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
524 __set_current_state(TASK_RUNNING);
525 tracehook_notify_signal();
531 static void io_assign_current_work(struct io_worker *worker,
532 struct io_wq_work *work)
539 raw_spin_lock(&worker->lock);
540 worker->cur_work = work;
541 worker->next_work = NULL;
542 raw_spin_unlock(&worker->lock);
545 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
547 static void io_worker_handle_work(struct io_worker *worker)
549 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
550 struct io_wqe *wqe = worker->wqe;
551 struct io_wq *wq = wqe->wq;
552 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
555 struct io_wq_work *work;
558 * If we got some work, mark us as busy. If we didn't, but
559 * the list isn't empty, it means we stalled on hashed work.
560 * Mark us stalled so we don't keep looking for work when we
561 * can't make progress, any work completion or insertion will
562 * clear the stalled flag.
564 raw_spin_lock(&acct->lock);
565 work = io_get_next_work(acct, worker);
566 raw_spin_unlock(&acct->lock);
568 __io_worker_busy(wqe, worker);
571 * Make sure cancelation can find this, even before
572 * it becomes the active work. That avoids a window
573 * where the work has been removed from our general
574 * work list, but isn't yet discoverable as the
575 * current work item for this worker.
577 raw_spin_lock(&worker->lock);
578 worker->next_work = work;
579 raw_spin_unlock(&worker->lock);
583 io_assign_current_work(worker, work);
584 __set_current_state(TASK_RUNNING);
586 /* handle a whole dependent link */
588 struct io_wq_work *next_hashed, *linked;
589 unsigned int hash = io_get_work_hash(work);
591 next_hashed = wq_next_work(work);
593 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
594 work->flags |= IO_WQ_WORK_CANCEL;
596 io_assign_current_work(worker, NULL);
598 linked = wq->free_work(work);
600 if (!work && linked && !io_wq_is_hashed(linked)) {
604 io_assign_current_work(worker, work);
606 io_wqe_enqueue(wqe, linked);
608 if (hash != -1U && !next_hashed) {
609 /* serialize hash clear with wake_up() */
610 spin_lock_irq(&wq->hash->wait.lock);
611 clear_bit(hash, &wq->hash->map);
612 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
613 spin_unlock_irq(&wq->hash->wait.lock);
614 if (wq_has_sleeper(&wq->hash->wait))
615 wake_up(&wq->hash->wait);
621 static int io_wqe_worker(void *data)
623 struct io_worker *worker = data;
624 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
625 struct io_wqe *wqe = worker->wqe;
626 struct io_wq *wq = wqe->wq;
627 bool last_timeout = false;
628 char buf[TASK_COMM_LEN];
630 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
632 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
633 set_task_comm(current, buf);
635 audit_alloc_kernel(current);
637 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
640 set_current_state(TASK_INTERRUPTIBLE);
641 while (io_acct_run_queue(acct))
642 io_worker_handle_work(worker);
644 raw_spin_lock(&wqe->lock);
645 /* timed out, exit unless we're the last worker */
646 if (last_timeout && acct->nr_workers > 1) {
648 raw_spin_unlock(&wqe->lock);
649 __set_current_state(TASK_RUNNING);
652 last_timeout = false;
653 __io_worker_idle(wqe, worker);
654 raw_spin_unlock(&wqe->lock);
655 if (io_flush_signals())
657 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
658 if (signal_pending(current)) {
661 if (!get_signal(&ksig))
668 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
669 io_worker_handle_work(worker);
672 io_worker_exit(worker);
677 * Called when a worker is scheduled in. Mark us as currently running.
679 void io_wq_worker_running(struct task_struct *tsk)
681 struct io_worker *worker = tsk->worker_private;
685 if (!(worker->flags & IO_WORKER_F_UP))
687 if (worker->flags & IO_WORKER_F_RUNNING)
689 worker->flags |= IO_WORKER_F_RUNNING;
690 io_wqe_inc_running(worker);
694 * Called when worker is going to sleep. If there are no workers currently
695 * running and we have work pending, wake up a free one or create a new one.
697 void io_wq_worker_sleeping(struct task_struct *tsk)
699 struct io_worker *worker = tsk->worker_private;
703 if (!(worker->flags & IO_WORKER_F_UP))
705 if (!(worker->flags & IO_WORKER_F_RUNNING))
708 worker->flags &= ~IO_WORKER_F_RUNNING;
709 io_wqe_dec_running(worker);
712 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
713 struct task_struct *tsk)
715 tsk->worker_private = worker;
717 set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
718 tsk->flags |= PF_NO_SETAFFINITY;
720 raw_spin_lock(&wqe->lock);
721 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
722 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
723 worker->flags |= IO_WORKER_F_FREE;
724 raw_spin_unlock(&wqe->lock);
725 wake_up_new_task(tsk);
728 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
733 static inline bool io_should_retry_thread(long err)
736 * Prevent perpetual task_work retry, if the task (or its group) is
739 if (fatal_signal_pending(current))
745 case -ERESTARTNOINTR:
746 case -ERESTARTNOHAND:
753 static void create_worker_cont(struct callback_head *cb)
755 struct io_worker *worker;
756 struct task_struct *tsk;
759 worker = container_of(cb, struct io_worker, create_work);
760 clear_bit_unlock(0, &worker->create_state);
762 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
764 io_init_new_worker(wqe, worker, tsk);
765 io_worker_release(worker);
767 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
768 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
770 atomic_dec(&acct->nr_running);
771 raw_spin_lock(&wqe->lock);
773 if (!acct->nr_workers) {
774 struct io_cb_cancel_data match = {
775 .fn = io_wq_work_match_all,
779 raw_spin_unlock(&wqe->lock);
780 while (io_acct_cancel_pending_work(wqe, acct, &match))
783 raw_spin_unlock(&wqe->lock);
785 io_worker_ref_put(wqe->wq);
790 /* re-create attempts grab a new worker ref, drop the existing one */
791 io_worker_release(worker);
792 schedule_work(&worker->work);
795 static void io_workqueue_create(struct work_struct *work)
797 struct io_worker *worker = container_of(work, struct io_worker, work);
798 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
800 if (!io_queue_worker_create(worker, acct, create_worker_cont))
804 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
806 struct io_wqe_acct *acct = &wqe->acct[index];
807 struct io_worker *worker;
808 struct task_struct *tsk;
810 __set_current_state(TASK_RUNNING);
812 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
815 atomic_dec(&acct->nr_running);
816 raw_spin_lock(&wqe->lock);
818 raw_spin_unlock(&wqe->lock);
819 io_worker_ref_put(wq);
823 refcount_set(&worker->ref, 1);
825 raw_spin_lock_init(&worker->lock);
826 init_completion(&worker->ref_done);
828 if (index == IO_WQ_ACCT_BOUND)
829 worker->flags |= IO_WORKER_F_BOUND;
831 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
833 io_init_new_worker(wqe, worker, tsk);
834 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
838 INIT_WORK(&worker->work, io_workqueue_create);
839 schedule_work(&worker->work);
846 * Iterate the passed in list and call the specific function for each
847 * worker that isn't exiting
849 static bool io_wq_for_each_worker(struct io_wqe *wqe,
850 bool (*func)(struct io_worker *, void *),
853 struct io_worker *worker;
856 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
857 if (io_worker_get(worker)) {
858 /* no task if node is/was offline */
860 ret = func(worker, data);
861 io_worker_release(worker);
870 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
872 set_notify_signal(worker->task);
873 wake_up_process(worker->task);
877 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
879 struct io_wq *wq = wqe->wq;
882 work->flags |= IO_WQ_WORK_CANCEL;
884 work = wq->free_work(work);
888 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
890 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
892 struct io_wq_work *tail;
894 if (!io_wq_is_hashed(work)) {
896 wq_list_add_tail(&work->list, &acct->work_list);
900 hash = io_get_work_hash(work);
901 tail = wqe->hash_tail[hash];
902 wqe->hash_tail[hash] = work;
906 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
909 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
914 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
916 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
917 struct io_cb_cancel_data match;
918 unsigned work_flags = work->flags;
922 * If io-wq is exiting for this task, or if the request has explicitly
923 * been marked as one that should not get executed, cancel it here.
925 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
926 (work->flags & IO_WQ_WORK_CANCEL)) {
927 io_run_cancel(work, wqe);
931 raw_spin_lock(&acct->lock);
932 io_wqe_insert_work(wqe, work);
933 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
934 raw_spin_unlock(&acct->lock);
936 raw_spin_lock(&wqe->lock);
938 do_create = !io_wqe_activate_free_worker(wqe, acct);
941 raw_spin_unlock(&wqe->lock);
943 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
944 !atomic_read(&acct->nr_running))) {
947 did_create = io_wqe_create_worker(wqe, acct);
948 if (likely(did_create))
951 raw_spin_lock(&wqe->lock);
952 if (acct->nr_workers) {
953 raw_spin_unlock(&wqe->lock);
956 raw_spin_unlock(&wqe->lock);
958 /* fatal condition, failed to create the first worker */
959 match.fn = io_wq_work_match_item,
961 match.cancel_all = false,
963 io_acct_cancel_pending_work(wqe, acct, &match);
967 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
969 struct io_wqe *wqe = wq->wqes[numa_node_id()];
971 io_wqe_enqueue(wqe, work);
975 * Work items that hash to the same value will not be done in parallel.
976 * Used to limit concurrent writes, generally hashed by inode.
978 void io_wq_hash_work(struct io_wq_work *work, void *val)
982 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
983 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
986 static bool __io_wq_worker_cancel(struct io_worker *worker,
987 struct io_cb_cancel_data *match,
988 struct io_wq_work *work)
990 if (work && match->fn(work, match->data)) {
991 work->flags |= IO_WQ_WORK_CANCEL;
992 set_notify_signal(worker->task);
999 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
1001 struct io_cb_cancel_data *match = data;
1004 * Hold the lock to avoid ->cur_work going out of scope, caller
1005 * may dereference the passed in work.
1007 raw_spin_lock(&worker->lock);
1008 if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
1009 __io_wq_worker_cancel(worker, match, worker->next_work))
1010 match->nr_running++;
1011 raw_spin_unlock(&worker->lock);
1013 return match->nr_running && !match->cancel_all;
1016 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
1017 struct io_wq_work *work,
1018 struct io_wq_work_node *prev)
1020 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
1021 unsigned int hash = io_get_work_hash(work);
1022 struct io_wq_work *prev_work = NULL;
1024 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
1026 prev_work = container_of(prev, struct io_wq_work, list);
1027 if (prev_work && io_get_work_hash(prev_work) == hash)
1028 wqe->hash_tail[hash] = prev_work;
1030 wqe->hash_tail[hash] = NULL;
1032 wq_list_del(&acct->work_list, &work->list, prev);
1035 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
1036 struct io_wqe_acct *acct,
1037 struct io_cb_cancel_data *match)
1039 struct io_wq_work_node *node, *prev;
1040 struct io_wq_work *work;
1042 raw_spin_lock(&acct->lock);
1043 wq_list_for_each(node, prev, &acct->work_list) {
1044 work = container_of(node, struct io_wq_work, list);
1045 if (!match->fn(work, match->data))
1047 io_wqe_remove_pending(wqe, work, prev);
1048 raw_spin_unlock(&acct->lock);
1049 io_run_cancel(work, wqe);
1050 match->nr_pending++;
1051 /* not safe to continue after unlock */
1054 raw_spin_unlock(&acct->lock);
1059 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1060 struct io_cb_cancel_data *match)
1064 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1065 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
1067 if (io_acct_cancel_pending_work(wqe, acct, match)) {
1068 if (match->cancel_all)
1075 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1076 struct io_cb_cancel_data *match)
1079 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1083 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1084 void *data, bool cancel_all)
1086 struct io_cb_cancel_data match = {
1089 .cancel_all = cancel_all,
1094 * First check pending list, if we're lucky we can just remove it
1095 * from there. CANCEL_OK means that the work is returned as-new,
1096 * no completion will be posted for it.
1098 * Then check if a free (going busy) or busy worker has the work
1099 * currently running. If we find it there, we'll return CANCEL_RUNNING
1100 * as an indication that we attempt to signal cancellation. The
1101 * completion will run normally in this case.
1103 * Do both of these while holding the wqe->lock, to ensure that
1104 * we'll find a work item regardless of state.
1106 for_each_node(node) {
1107 struct io_wqe *wqe = wq->wqes[node];
1109 io_wqe_cancel_pending_work(wqe, &match);
1110 if (match.nr_pending && !match.cancel_all)
1111 return IO_WQ_CANCEL_OK;
1113 raw_spin_lock(&wqe->lock);
1114 io_wqe_cancel_running_work(wqe, &match);
1115 raw_spin_unlock(&wqe->lock);
1116 if (match.nr_running && !match.cancel_all)
1117 return IO_WQ_CANCEL_RUNNING;
1120 if (match.nr_running)
1121 return IO_WQ_CANCEL_RUNNING;
1122 if (match.nr_pending)
1123 return IO_WQ_CANCEL_OK;
1124 return IO_WQ_CANCEL_NOTFOUND;
1127 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1128 int sync, void *key)
1130 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
1133 list_del_init(&wait->entry);
1136 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1137 struct io_wqe_acct *acct = &wqe->acct[i];
1139 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1140 io_wqe_activate_free_worker(wqe, acct);
1146 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1151 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1152 return ERR_PTR(-EINVAL);
1153 if (WARN_ON_ONCE(!bounded))
1154 return ERR_PTR(-EINVAL);
1156 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
1158 return ERR_PTR(-ENOMEM);
1159 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1163 refcount_inc(&data->hash->refs);
1164 wq->hash = data->hash;
1165 wq->free_work = data->free_work;
1166 wq->do_work = data->do_work;
1169 for_each_node(node) {
1171 int alloc_node = node;
1173 if (!node_online(alloc_node))
1174 alloc_node = NUMA_NO_NODE;
1175 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1178 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1180 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
1181 wq->wqes[node] = wqe;
1182 wqe->node = alloc_node;
1183 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1184 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1185 task_rlimit(current, RLIMIT_NPROC);
1186 INIT_LIST_HEAD(&wqe->wait.entry);
1187 wqe->wait.func = io_wqe_hash_wake;
1188 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1189 struct io_wqe_acct *acct = &wqe->acct[i];
1192 atomic_set(&acct->nr_running, 0);
1193 INIT_WQ_LIST(&acct->work_list);
1194 raw_spin_lock_init(&acct->lock);
1197 raw_spin_lock_init(&wqe->lock);
1198 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1199 INIT_LIST_HEAD(&wqe->all_list);
1202 wq->task = get_task_struct(data->task);
1203 atomic_set(&wq->worker_refs, 1);
1204 init_completion(&wq->worker_done);
1207 io_wq_put_hash(data->hash);
1208 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1209 for_each_node(node) {
1210 if (!wq->wqes[node])
1212 free_cpumask_var(wq->wqes[node]->cpu_mask);
1213 kfree(wq->wqes[node]);
1217 return ERR_PTR(ret);
1220 static bool io_task_work_match(struct callback_head *cb, void *data)
1222 struct io_worker *worker;
1224 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1226 worker = container_of(cb, struct io_worker, create_work);
1227 return worker->wqe->wq == data;
1230 void io_wq_exit_start(struct io_wq *wq)
1232 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1235 static void io_wq_cancel_tw_create(struct io_wq *wq)
1237 struct callback_head *cb;
1239 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1240 struct io_worker *worker;
1242 worker = container_of(cb, struct io_worker, create_work);
1243 io_worker_cancel_cb(worker);
1247 static void io_wq_exit_workers(struct io_wq *wq)
1254 io_wq_cancel_tw_create(wq);
1257 for_each_node(node) {
1258 struct io_wqe *wqe = wq->wqes[node];
1260 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1263 io_worker_ref_put(wq);
1264 wait_for_completion(&wq->worker_done);
1266 for_each_node(node) {
1267 spin_lock_irq(&wq->hash->wait.lock);
1268 list_del_init(&wq->wqes[node]->wait.entry);
1269 spin_unlock_irq(&wq->hash->wait.lock);
1271 put_task_struct(wq->task);
1275 static void io_wq_destroy(struct io_wq *wq)
1279 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1281 for_each_node(node) {
1282 struct io_wqe *wqe = wq->wqes[node];
1283 struct io_cb_cancel_data match = {
1284 .fn = io_wq_work_match_all,
1287 io_wqe_cancel_pending_work(wqe, &match);
1288 free_cpumask_var(wqe->cpu_mask);
1291 io_wq_put_hash(wq->hash);
1295 void io_wq_put_and_exit(struct io_wq *wq)
1297 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1299 io_wq_exit_workers(wq);
1303 struct online_data {
1308 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1310 struct online_data *od = data;
1313 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1315 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
1319 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1321 struct online_data od = {
1329 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
1334 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1336 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1338 return __io_wq_cpu_online(wq, cpu, true);
1341 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1343 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1345 return __io_wq_cpu_online(wq, cpu, false);
1348 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1354 struct io_wqe *wqe = wq->wqes[i];
1357 cpumask_copy(wqe->cpu_mask, mask);
1359 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1366 * Set max number of unbounded workers, returns old value. If new_count is 0,
1367 * then just return the old value.
1369 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1371 int prev[IO_WQ_ACCT_NR];
1372 bool first_node = true;
1375 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1376 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1377 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1379 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1380 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1381 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1384 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1388 for_each_node(node) {
1389 struct io_wqe *wqe = wq->wqes[node];
1390 struct io_wqe_acct *acct;
1392 raw_spin_lock(&wqe->lock);
1393 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1394 acct = &wqe->acct[i];
1396 prev[i] = max_t(int, acct->max_workers, prev[i]);
1398 acct->max_workers = new_count[i];
1400 raw_spin_unlock(&wqe->lock);
1405 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1406 new_count[i] = prev[i];
1411 static __init int io_wq_init(void)
1415 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1416 io_wq_cpu_online, io_wq_cpu_offline);
1422 subsys_initcall(io_wq_init);