drm/vc4: drv: Add error handding for bind
[platform/kernel/linux-rpi.git] / fs / io-wq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Basic worker thread pool for io_uring
4  *
5  * Copyright (C) 2019 Jens Axboe
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/mm.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/fs_struct.h>
19 #include <linux/task_work.h>
20 #include <linux/blk-cgroup.h>
21 #include <linux/audit.h>
22 #include <linux/cpu.h>
23
24 #include "../kernel/sched/sched.h"
25 #include "io-wq.h"
26
27 #define WORKER_IDLE_TIMEOUT     (5 * HZ)
28
29 enum {
30         IO_WORKER_F_UP          = 1,    /* up and active */
31         IO_WORKER_F_RUNNING     = 2,    /* account as running */
32         IO_WORKER_F_FREE        = 4,    /* worker on free list */
33         IO_WORKER_F_FIXED       = 8,    /* static idle worker */
34         IO_WORKER_F_BOUND       = 16,   /* is doing bounded work */
35 };
36
37 enum {
38         IO_WQ_BIT_EXIT          = 0,    /* wq exiting */
39         IO_WQ_BIT_CANCEL        = 1,    /* cancel work on list */
40         IO_WQ_BIT_ERROR         = 2,    /* error on setup */
41 };
42
43 enum {
44         IO_WQE_FLAG_STALLED     = 1,    /* stalled on hash */
45 };
46
47 /*
48  * One for each thread in a wqe pool
49  */
50 struct io_worker {
51         refcount_t ref;
52         unsigned flags;
53         struct hlist_nulls_node nulls_node;
54         struct list_head all_list;
55         struct task_struct *task;
56         struct io_wqe *wqe;
57
58         struct io_wq_work *cur_work;
59         spinlock_t lock;
60
61         struct rcu_head rcu;
62         struct mm_struct *mm;
63 #ifdef CONFIG_BLK_CGROUP
64         struct cgroup_subsys_state *blkcg_css;
65 #endif
66         const struct cred *cur_creds;
67         const struct cred *saved_creds;
68         struct files_struct *restore_files;
69         struct nsproxy *restore_nsproxy;
70         struct fs_struct *restore_fs;
71 };
72
73 #if BITS_PER_LONG == 64
74 #define IO_WQ_HASH_ORDER        6
75 #else
76 #define IO_WQ_HASH_ORDER        5
77 #endif
78
79 #define IO_WQ_NR_HASH_BUCKETS   (1u << IO_WQ_HASH_ORDER)
80
81 struct io_wqe_acct {
82         unsigned nr_workers;
83         unsigned max_workers;
84         atomic_t nr_running;
85 };
86
87 enum {
88         IO_WQ_ACCT_BOUND,
89         IO_WQ_ACCT_UNBOUND,
90 };
91
92 /*
93  * Per-node worker thread pool
94  */
95 struct io_wqe {
96         struct {
97                 raw_spinlock_t lock;
98                 struct io_wq_work_list work_list;
99                 unsigned long hash_map;
100                 unsigned flags;
101         } ____cacheline_aligned_in_smp;
102
103         int node;
104         struct io_wqe_acct acct[2];
105
106         struct hlist_nulls_head free_list;
107         struct list_head all_list;
108
109         struct io_wq *wq;
110         struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
111 };
112
113 /*
114  * Per io_wq state
115   */
116 struct io_wq {
117         struct io_wqe **wqes;
118         unsigned long state;
119
120         free_work_fn *free_work;
121         io_wq_work_fn *do_work;
122
123         struct task_struct *manager;
124         struct user_struct *user;
125         refcount_t refs;
126         struct completion done;
127
128         struct hlist_node cpuhp_node;
129
130         refcount_t use_refs;
131 };
132
133 static enum cpuhp_state io_wq_online;
134
135 static bool io_worker_get(struct io_worker *worker)
136 {
137         return refcount_inc_not_zero(&worker->ref);
138 }
139
140 static void io_worker_release(struct io_worker *worker)
141 {
142         if (refcount_dec_and_test(&worker->ref))
143                 wake_up_process(worker->task);
144 }
145
146 /*
147  * Note: drops the wqe->lock if returning true! The caller must re-acquire
148  * the lock in that case. Some callers need to restart handling if this
149  * happens, so we can't just re-acquire the lock on behalf of the caller.
150  */
151 static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
152 {
153         bool dropped_lock = false;
154
155         if (worker->saved_creds) {
156                 revert_creds(worker->saved_creds);
157                 worker->cur_creds = worker->saved_creds = NULL;
158         }
159
160         if (current->files != worker->restore_files) {
161                 __acquire(&wqe->lock);
162                 raw_spin_unlock_irq(&wqe->lock);
163                 dropped_lock = true;
164
165                 task_lock(current);
166                 current->files = worker->restore_files;
167                 current->nsproxy = worker->restore_nsproxy;
168                 task_unlock(current);
169         }
170
171         if (current->fs != worker->restore_fs)
172                 current->fs = worker->restore_fs;
173
174         /*
175          * If we have an active mm, we need to drop the wq lock before unusing
176          * it. If we do, return true and let the caller retry the idle loop.
177          */
178         if (worker->mm) {
179                 if (!dropped_lock) {
180                         __acquire(&wqe->lock);
181                         raw_spin_unlock_irq(&wqe->lock);
182                         dropped_lock = true;
183                 }
184                 __set_current_state(TASK_RUNNING);
185                 kthread_unuse_mm(worker->mm);
186                 mmput(worker->mm);
187                 worker->mm = NULL;
188         }
189
190 #ifdef CONFIG_BLK_CGROUP
191         if (worker->blkcg_css) {
192                 kthread_associate_blkcg(NULL);
193                 worker->blkcg_css = NULL;
194         }
195 #endif
196         if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
197                 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
198         return dropped_lock;
199 }
200
201 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
202                                                    struct io_wq_work *work)
203 {
204         if (work->flags & IO_WQ_WORK_UNBOUND)
205                 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
206
207         return &wqe->acct[IO_WQ_ACCT_BOUND];
208 }
209
210 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
211                                                   struct io_worker *worker)
212 {
213         if (worker->flags & IO_WORKER_F_BOUND)
214                 return &wqe->acct[IO_WQ_ACCT_BOUND];
215
216         return &wqe->acct[IO_WQ_ACCT_UNBOUND];
217 }
218
219 static void io_worker_exit(struct io_worker *worker)
220 {
221         struct io_wqe *wqe = worker->wqe;
222         struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
223
224         /*
225          * If we're not at zero, someone else is holding a brief reference
226          * to the worker. Wait for that to go away.
227          */
228         set_current_state(TASK_INTERRUPTIBLE);
229         if (!refcount_dec_and_test(&worker->ref))
230                 schedule();
231         __set_current_state(TASK_RUNNING);
232
233         preempt_disable();
234         current->flags &= ~PF_IO_WORKER;
235         if (worker->flags & IO_WORKER_F_RUNNING)
236                 atomic_dec(&acct->nr_running);
237         if (!(worker->flags & IO_WORKER_F_BOUND))
238                 atomic_dec(&wqe->wq->user->processes);
239         worker->flags = 0;
240         preempt_enable();
241
242         raw_spin_lock_irq(&wqe->lock);
243         hlist_nulls_del_rcu(&worker->nulls_node);
244         list_del_rcu(&worker->all_list);
245         if (__io_worker_unuse(wqe, worker)) {
246                 __release(&wqe->lock);
247                 raw_spin_lock_irq(&wqe->lock);
248         }
249         acct->nr_workers--;
250         raw_spin_unlock_irq(&wqe->lock);
251
252         kfree_rcu(worker, rcu);
253         if (refcount_dec_and_test(&wqe->wq->refs))
254                 complete(&wqe->wq->done);
255 }
256
257 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
258         __must_hold(wqe->lock)
259 {
260         if (!wq_list_empty(&wqe->work_list) &&
261             !(wqe->flags & IO_WQE_FLAG_STALLED))
262                 return true;
263         return false;
264 }
265
266 /*
267  * Check head of free list for an available worker. If one isn't available,
268  * caller must wake up the wq manager to create one.
269  */
270 static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
271         __must_hold(RCU)
272 {
273         struct hlist_nulls_node *n;
274         struct io_worker *worker;
275
276         n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
277         if (is_a_nulls(n))
278                 return false;
279
280         worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
281         if (io_worker_get(worker)) {
282                 wake_up_process(worker->task);
283                 io_worker_release(worker);
284                 return true;
285         }
286
287         return false;
288 }
289
290 /*
291  * We need a worker. If we find a free one, we're good. If not, and we're
292  * below the max number of workers, wake up the manager to create one.
293  */
294 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
295 {
296         bool ret;
297
298         /*
299          * Most likely an attempt to queue unbounded work on an io_wq that
300          * wasn't setup with any unbounded workers.
301          */
302         WARN_ON_ONCE(!acct->max_workers);
303
304         rcu_read_lock();
305         ret = io_wqe_activate_free_worker(wqe);
306         rcu_read_unlock();
307
308         if (!ret && acct->nr_workers < acct->max_workers)
309                 wake_up_process(wqe->wq->manager);
310 }
311
312 static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
313 {
314         struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
315
316         atomic_inc(&acct->nr_running);
317 }
318
319 static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
320         __must_hold(wqe->lock)
321 {
322         struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
323
324         if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
325                 io_wqe_wake_worker(wqe, acct);
326 }
327
328 static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
329 {
330         allow_kernel_signal(SIGINT);
331
332         current->flags |= PF_IO_WORKER;
333
334         worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
335         worker->restore_files = current->files;
336         worker->restore_nsproxy = current->nsproxy;
337         worker->restore_fs = current->fs;
338         io_wqe_inc_running(wqe, worker);
339 }
340
341 /*
342  * Worker will start processing some work. Move it to the busy list, if
343  * it's currently on the freelist
344  */
345 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
346                              struct io_wq_work *work)
347         __must_hold(wqe->lock)
348 {
349         bool worker_bound, work_bound;
350
351         if (worker->flags & IO_WORKER_F_FREE) {
352                 worker->flags &= ~IO_WORKER_F_FREE;
353                 hlist_nulls_del_init_rcu(&worker->nulls_node);
354         }
355
356         /*
357          * If worker is moving from bound to unbound (or vice versa), then
358          * ensure we update the running accounting.
359          */
360         worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
361         work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
362         if (worker_bound != work_bound) {
363                 io_wqe_dec_running(wqe, worker);
364                 if (work_bound) {
365                         worker->flags |= IO_WORKER_F_BOUND;
366                         wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
367                         wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
368                         atomic_dec(&wqe->wq->user->processes);
369                 } else {
370                         worker->flags &= ~IO_WORKER_F_BOUND;
371                         wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
372                         wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
373                         atomic_inc(&wqe->wq->user->processes);
374                 }
375                 io_wqe_inc_running(wqe, worker);
376          }
377 }
378
379 /*
380  * No work, worker going to sleep. Move to freelist, and unuse mm if we
381  * have one attached. Dropping the mm may potentially sleep, so we drop
382  * the lock in that case and return success. Since the caller has to
383  * retry the loop in that case (we changed task state), we don't regrab
384  * the lock if we return success.
385  */
386 static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
387         __must_hold(wqe->lock)
388 {
389         if (!(worker->flags & IO_WORKER_F_FREE)) {
390                 worker->flags |= IO_WORKER_F_FREE;
391                 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
392         }
393
394         return __io_worker_unuse(wqe, worker);
395 }
396
397 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
398 {
399         return work->flags >> IO_WQ_HASH_SHIFT;
400 }
401
402 static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
403         __must_hold(wqe->lock)
404 {
405         struct io_wq_work_node *node, *prev;
406         struct io_wq_work *work, *tail;
407         unsigned int hash;
408
409         wq_list_for_each(node, prev, &wqe->work_list) {
410                 work = container_of(node, struct io_wq_work, list);
411
412                 /* not hashed, can run anytime */
413                 if (!io_wq_is_hashed(work)) {
414                         wq_list_del(&wqe->work_list, node, prev);
415                         return work;
416                 }
417
418                 /* hashed, can run if not already running */
419                 hash = io_get_work_hash(work);
420                 if (!(wqe->hash_map & BIT(hash))) {
421                         wqe->hash_map |= BIT(hash);
422                         /* all items with this hash lie in [work, tail] */
423                         tail = wqe->hash_tail[hash];
424                         wqe->hash_tail[hash] = NULL;
425                         wq_list_cut(&wqe->work_list, &tail->list, prev);
426                         return work;
427                 }
428         }
429
430         return NULL;
431 }
432
433 static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
434 {
435         if (worker->mm) {
436                 kthread_unuse_mm(worker->mm);
437                 mmput(worker->mm);
438                 worker->mm = NULL;
439         }
440
441         if (mmget_not_zero(work->identity->mm)) {
442                 kthread_use_mm(work->identity->mm);
443                 worker->mm = work->identity->mm;
444                 return;
445         }
446
447         /* failed grabbing mm, ensure work gets cancelled */
448         work->flags |= IO_WQ_WORK_CANCEL;
449 }
450
451 static inline void io_wq_switch_blkcg(struct io_worker *worker,
452                                       struct io_wq_work *work)
453 {
454 #ifdef CONFIG_BLK_CGROUP
455         if (!(work->flags & IO_WQ_WORK_BLKCG))
456                 return;
457         if (work->identity->blkcg_css != worker->blkcg_css) {
458                 kthread_associate_blkcg(work->identity->blkcg_css);
459                 worker->blkcg_css = work->identity->blkcg_css;
460         }
461 #endif
462 }
463
464 static void io_wq_switch_creds(struct io_worker *worker,
465                                struct io_wq_work *work)
466 {
467         const struct cred *old_creds = override_creds(work->identity->creds);
468
469         worker->cur_creds = work->identity->creds;
470         if (worker->saved_creds)
471                 put_cred(old_creds); /* creds set by previous switch */
472         else
473                 worker->saved_creds = old_creds;
474 }
475
476 static void io_impersonate_work(struct io_worker *worker,
477                                 struct io_wq_work *work)
478 {
479         if ((work->flags & IO_WQ_WORK_FILES) &&
480             current->files != work->identity->files) {
481                 task_lock(current);
482                 current->files = work->identity->files;
483                 current->nsproxy = work->identity->nsproxy;
484                 task_unlock(current);
485         }
486         if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
487                 current->fs = work->identity->fs;
488         if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
489                 io_wq_switch_mm(worker, work);
490         if ((work->flags & IO_WQ_WORK_CREDS) &&
491             worker->cur_creds != work->identity->creds)
492                 io_wq_switch_creds(worker, work);
493         if (work->flags & IO_WQ_WORK_FSIZE)
494                 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
495         else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
496                 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
497         io_wq_switch_blkcg(worker, work);
498 #ifdef CONFIG_AUDIT
499         current->loginuid = work->identity->loginuid;
500         current->sessionid = work->identity->sessionid;
501 #endif
502 }
503
504 static void io_assign_current_work(struct io_worker *worker,
505                                    struct io_wq_work *work)
506 {
507         if (work) {
508                 /* flush pending signals before assigning new work */
509                 if (signal_pending(current))
510                         flush_signals(current);
511                 cond_resched();
512         }
513
514 #ifdef CONFIG_AUDIT
515         current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
516         current->sessionid = AUDIT_SID_UNSET;
517 #endif
518
519         spin_lock_irq(&worker->lock);
520         worker->cur_work = work;
521         spin_unlock_irq(&worker->lock);
522 }
523
524 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
525
526 static void io_worker_handle_work(struct io_worker *worker)
527         __releases(wqe->lock)
528 {
529         struct io_wqe *wqe = worker->wqe;
530         struct io_wq *wq = wqe->wq;
531
532         do {
533                 struct io_wq_work *work;
534 get_next:
535                 /*
536                  * If we got some work, mark us as busy. If we didn't, but
537                  * the list isn't empty, it means we stalled on hashed work.
538                  * Mark us stalled so we don't keep looking for work when we
539                  * can't make progress, any work completion or insertion will
540                  * clear the stalled flag.
541                  */
542                 work = io_get_next_work(wqe);
543                 if (work)
544                         __io_worker_busy(wqe, worker, work);
545                 else if (!wq_list_empty(&wqe->work_list))
546                         wqe->flags |= IO_WQE_FLAG_STALLED;
547
548                 raw_spin_unlock_irq(&wqe->lock);
549                 if (!work)
550                         break;
551                 io_assign_current_work(worker, work);
552
553                 /* handle a whole dependent link */
554                 do {
555                         struct io_wq_work *old_work, *next_hashed, *linked;
556                         unsigned int hash = io_get_work_hash(work);
557
558                         next_hashed = wq_next_work(work);
559                         io_impersonate_work(worker, work);
560                         /*
561                          * OK to set IO_WQ_WORK_CANCEL even for uncancellable
562                          * work, the worker function will do the right thing.
563                          */
564                         if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
565                                 work->flags |= IO_WQ_WORK_CANCEL;
566
567                         old_work = work;
568                         linked = wq->do_work(work);
569
570                         work = next_hashed;
571                         if (!work && linked && !io_wq_is_hashed(linked)) {
572                                 work = linked;
573                                 linked = NULL;
574                         }
575                         io_assign_current_work(worker, work);
576                         wq->free_work(old_work);
577
578                         if (linked)
579                                 io_wqe_enqueue(wqe, linked);
580
581                         if (hash != -1U && !next_hashed) {
582                                 raw_spin_lock_irq(&wqe->lock);
583                                 wqe->hash_map &= ~BIT_ULL(hash);
584                                 wqe->flags &= ~IO_WQE_FLAG_STALLED;
585                                 /* skip unnecessary unlock-lock wqe->lock */
586                                 if (!work)
587                                         goto get_next;
588                                 raw_spin_unlock_irq(&wqe->lock);
589                         }
590                 } while (work);
591
592                 raw_spin_lock_irq(&wqe->lock);
593         } while (1);
594 }
595
596 static int io_wqe_worker(void *data)
597 {
598         struct io_worker *worker = data;
599         struct io_wqe *wqe = worker->wqe;
600         struct io_wq *wq = wqe->wq;
601
602         io_worker_start(wqe, worker);
603
604         while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
605                 set_current_state(TASK_INTERRUPTIBLE);
606 loop:
607                 raw_spin_lock_irq(&wqe->lock);
608                 if (io_wqe_run_queue(wqe)) {
609                         __set_current_state(TASK_RUNNING);
610                         io_worker_handle_work(worker);
611                         goto loop;
612                 }
613                 /* drops the lock on success, retry */
614                 if (__io_worker_idle(wqe, worker)) {
615                         __release(&wqe->lock);
616                         goto loop;
617                 }
618                 raw_spin_unlock_irq(&wqe->lock);
619                 if (signal_pending(current))
620                         flush_signals(current);
621                 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
622                         continue;
623                 /* timed out, exit unless we're the fixed worker */
624                 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
625                     !(worker->flags & IO_WORKER_F_FIXED))
626                         break;
627         }
628
629         if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
630                 raw_spin_lock_irq(&wqe->lock);
631                 if (!wq_list_empty(&wqe->work_list))
632                         io_worker_handle_work(worker);
633                 else
634                         raw_spin_unlock_irq(&wqe->lock);
635         }
636
637         io_worker_exit(worker);
638         return 0;
639 }
640
641 /*
642  * Called when a worker is scheduled in. Mark us as currently running.
643  */
644 void io_wq_worker_running(struct task_struct *tsk)
645 {
646         struct io_worker *worker = kthread_data(tsk);
647         struct io_wqe *wqe = worker->wqe;
648
649         if (!(worker->flags & IO_WORKER_F_UP))
650                 return;
651         if (worker->flags & IO_WORKER_F_RUNNING)
652                 return;
653         worker->flags |= IO_WORKER_F_RUNNING;
654         io_wqe_inc_running(wqe, worker);
655 }
656
657 /*
658  * Called when worker is going to sleep. If there are no workers currently
659  * running and we have work pending, wake up a free one or have the manager
660  * set one up.
661  */
662 void io_wq_worker_sleeping(struct task_struct *tsk)
663 {
664         struct io_worker *worker = kthread_data(tsk);
665         struct io_wqe *wqe = worker->wqe;
666
667         if (!(worker->flags & IO_WORKER_F_UP))
668                 return;
669         if (!(worker->flags & IO_WORKER_F_RUNNING))
670                 return;
671
672         worker->flags &= ~IO_WORKER_F_RUNNING;
673
674         raw_spin_lock_irq(&wqe->lock);
675         io_wqe_dec_running(wqe, worker);
676         raw_spin_unlock_irq(&wqe->lock);
677 }
678
679 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
680 {
681         struct io_wqe_acct *acct = &wqe->acct[index];
682         struct io_worker *worker;
683
684         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
685         if (!worker)
686                 return false;
687
688         refcount_set(&worker->ref, 1);
689         worker->nulls_node.pprev = NULL;
690         worker->wqe = wqe;
691         spin_lock_init(&worker->lock);
692
693         worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
694                                 "io_wqe_worker-%d/%d", index, wqe->node);
695         if (IS_ERR(worker->task)) {
696                 kfree(worker);
697                 return false;
698         }
699         kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
700
701         raw_spin_lock_irq(&wqe->lock);
702         hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
703         list_add_tail_rcu(&worker->all_list, &wqe->all_list);
704         worker->flags |= IO_WORKER_F_FREE;
705         if (index == IO_WQ_ACCT_BOUND)
706                 worker->flags |= IO_WORKER_F_BOUND;
707         if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
708                 worker->flags |= IO_WORKER_F_FIXED;
709         acct->nr_workers++;
710         raw_spin_unlock_irq(&wqe->lock);
711
712         if (index == IO_WQ_ACCT_UNBOUND)
713                 atomic_inc(&wq->user->processes);
714
715         refcount_inc(&wq->refs);
716         wake_up_process(worker->task);
717         return true;
718 }
719
720 static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
721         __must_hold(wqe->lock)
722 {
723         struct io_wqe_acct *acct = &wqe->acct[index];
724
725         /* if we have available workers or no work, no need */
726         if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
727                 return false;
728         return acct->nr_workers < acct->max_workers;
729 }
730
731 static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
732 {
733         send_sig(SIGINT, worker->task, 1);
734         return false;
735 }
736
737 /*
738  * Iterate the passed in list and call the specific function for each
739  * worker that isn't exiting
740  */
741 static bool io_wq_for_each_worker(struct io_wqe *wqe,
742                                   bool (*func)(struct io_worker *, void *),
743                                   void *data)
744 {
745         struct io_worker *worker;
746         bool ret = false;
747
748         list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
749                 if (io_worker_get(worker)) {
750                         /* no task if node is/was offline */
751                         if (worker->task)
752                                 ret = func(worker, data);
753                         io_worker_release(worker);
754                         if (ret)
755                                 break;
756                 }
757         }
758
759         return ret;
760 }
761
762 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
763 {
764         wake_up_process(worker->task);
765         return false;
766 }
767
768 /*
769  * Manager thread. Tasked with creating new workers, if we need them.
770  */
771 static int io_wq_manager(void *data)
772 {
773         struct io_wq *wq = data;
774         int node;
775
776         /* create fixed workers */
777         refcount_set(&wq->refs, 1);
778         for_each_node(node) {
779                 if (!node_online(node))
780                         continue;
781                 if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
782                         continue;
783                 set_bit(IO_WQ_BIT_ERROR, &wq->state);
784                 set_bit(IO_WQ_BIT_EXIT, &wq->state);
785                 goto out;
786         }
787
788         complete(&wq->done);
789
790         while (!kthread_should_stop()) {
791                 if (current->task_works)
792                         task_work_run();
793
794                 for_each_node(node) {
795                         struct io_wqe *wqe = wq->wqes[node];
796                         bool fork_worker[2] = { false, false };
797
798                         if (!node_online(node))
799                                 continue;
800
801                         raw_spin_lock_irq(&wqe->lock);
802                         if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
803                                 fork_worker[IO_WQ_ACCT_BOUND] = true;
804                         if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
805                                 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
806                         raw_spin_unlock_irq(&wqe->lock);
807                         if (fork_worker[IO_WQ_ACCT_BOUND])
808                                 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
809                         if (fork_worker[IO_WQ_ACCT_UNBOUND])
810                                 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
811                 }
812                 set_current_state(TASK_INTERRUPTIBLE);
813                 schedule_timeout(HZ);
814         }
815
816         if (current->task_works)
817                 task_work_run();
818
819 out:
820         if (refcount_dec_and_test(&wq->refs)) {
821                 complete(&wq->done);
822                 return 0;
823         }
824         /* if ERROR is set and we get here, we have workers to wake */
825         if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
826                 rcu_read_lock();
827                 for_each_node(node)
828                         io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
829                 rcu_read_unlock();
830         }
831         return 0;
832 }
833
834 static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
835                             struct io_wq_work *work)
836 {
837         bool free_worker;
838
839         if (!(work->flags & IO_WQ_WORK_UNBOUND))
840                 return true;
841         if (atomic_read(&acct->nr_running))
842                 return true;
843
844         rcu_read_lock();
845         free_worker = !hlist_nulls_empty(&wqe->free_list);
846         rcu_read_unlock();
847         if (free_worker)
848                 return true;
849
850         if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
851             !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
852                 return false;
853
854         return true;
855 }
856
857 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
858 {
859         struct io_wq *wq = wqe->wq;
860
861         do {
862                 struct io_wq_work *old_work = work;
863
864                 work->flags |= IO_WQ_WORK_CANCEL;
865                 work = wq->do_work(work);
866                 wq->free_work(old_work);
867         } while (work);
868 }
869
870 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
871 {
872         unsigned int hash;
873         struct io_wq_work *tail;
874
875         if (!io_wq_is_hashed(work)) {
876 append:
877                 wq_list_add_tail(&work->list, &wqe->work_list);
878                 return;
879         }
880
881         hash = io_get_work_hash(work);
882         tail = wqe->hash_tail[hash];
883         wqe->hash_tail[hash] = work;
884         if (!tail)
885                 goto append;
886
887         wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
888 }
889
890 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
891 {
892         struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
893         int work_flags;
894         unsigned long flags;
895
896         /*
897          * Do early check to see if we need a new unbound worker, and if we do,
898          * if we're allowed to do so. This isn't 100% accurate as there's a
899          * gap between this check and incrementing the value, but that's OK.
900          * It's close enough to not be an issue, fork() has the same delay.
901          */
902         if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
903                 io_run_cancel(work, wqe);
904                 return;
905         }
906
907         work_flags = work->flags;
908         raw_spin_lock_irqsave(&wqe->lock, flags);
909         io_wqe_insert_work(wqe, work);
910         wqe->flags &= ~IO_WQE_FLAG_STALLED;
911         raw_spin_unlock_irqrestore(&wqe->lock, flags);
912
913         if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
914             !atomic_read(&acct->nr_running))
915                 io_wqe_wake_worker(wqe, acct);
916 }
917
918 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
919 {
920         struct io_wqe *wqe = wq->wqes[numa_node_id()];
921
922         io_wqe_enqueue(wqe, work);
923 }
924
925 /*
926  * Work items that hash to the same value will not be done in parallel.
927  * Used to limit concurrent writes, generally hashed by inode.
928  */
929 void io_wq_hash_work(struct io_wq_work *work, void *val)
930 {
931         unsigned int bit;
932
933         bit = hash_ptr(val, IO_WQ_HASH_ORDER);
934         work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
935 }
936
937 void io_wq_cancel_all(struct io_wq *wq)
938 {
939         int node;
940
941         set_bit(IO_WQ_BIT_CANCEL, &wq->state);
942
943         rcu_read_lock();
944         for_each_node(node) {
945                 struct io_wqe *wqe = wq->wqes[node];
946
947                 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
948         }
949         rcu_read_unlock();
950 }
951
952 struct io_cb_cancel_data {
953         work_cancel_fn *fn;
954         void *data;
955         int nr_running;
956         int nr_pending;
957         bool cancel_all;
958 };
959
960 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
961 {
962         struct io_cb_cancel_data *match = data;
963         unsigned long flags;
964
965         /*
966          * Hold the lock to avoid ->cur_work going out of scope, caller
967          * may dereference the passed in work.
968          */
969         spin_lock_irqsave(&worker->lock, flags);
970         if (worker->cur_work &&
971             !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
972             match->fn(worker->cur_work, match->data)) {
973                 send_sig(SIGINT, worker->task, 1);
974                 match->nr_running++;
975         }
976         spin_unlock_irqrestore(&worker->lock, flags);
977
978         return match->nr_running && !match->cancel_all;
979 }
980
981 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
982                                          struct io_wq_work *work,
983                                          struct io_wq_work_node *prev)
984 {
985         unsigned int hash = io_get_work_hash(work);
986         struct io_wq_work *prev_work = NULL;
987
988         if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
989                 if (prev)
990                         prev_work = container_of(prev, struct io_wq_work, list);
991                 if (prev_work && io_get_work_hash(prev_work) == hash)
992                         wqe->hash_tail[hash] = prev_work;
993                 else
994                         wqe->hash_tail[hash] = NULL;
995         }
996         wq_list_del(&wqe->work_list, &work->list, prev);
997 }
998
999 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1000                                        struct io_cb_cancel_data *match)
1001 {
1002         struct io_wq_work_node *node, *prev;
1003         struct io_wq_work *work;
1004         unsigned long flags;
1005
1006 retry:
1007         raw_spin_lock_irqsave(&wqe->lock, flags);
1008         wq_list_for_each(node, prev, &wqe->work_list) {
1009                 work = container_of(node, struct io_wq_work, list);
1010                 if (!match->fn(work, match->data))
1011                         continue;
1012                 io_wqe_remove_pending(wqe, work, prev);
1013                 raw_spin_unlock_irqrestore(&wqe->lock, flags);
1014                 io_run_cancel(work, wqe);
1015                 match->nr_pending++;
1016                 if (!match->cancel_all)
1017                         return;
1018
1019                 /* not safe to continue after unlock */
1020                 goto retry;
1021         }
1022         raw_spin_unlock_irqrestore(&wqe->lock, flags);
1023 }
1024
1025 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1026                                        struct io_cb_cancel_data *match)
1027 {
1028         rcu_read_lock();
1029         io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1030         rcu_read_unlock();
1031 }
1032
1033 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1034                                   void *data, bool cancel_all)
1035 {
1036         struct io_cb_cancel_data match = {
1037                 .fn             = cancel,
1038                 .data           = data,
1039                 .cancel_all     = cancel_all,
1040         };
1041         int node;
1042
1043         /*
1044          * First check pending list, if we're lucky we can just remove it
1045          * from there. CANCEL_OK means that the work is returned as-new,
1046          * no completion will be posted for it.
1047          */
1048         for_each_node(node) {
1049                 struct io_wqe *wqe = wq->wqes[node];
1050
1051                 io_wqe_cancel_pending_work(wqe, &match);
1052                 if (match.nr_pending && !match.cancel_all)
1053                         return IO_WQ_CANCEL_OK;
1054         }
1055
1056         /*
1057          * Now check if a free (going busy) or busy worker has the work
1058          * currently running. If we find it there, we'll return CANCEL_RUNNING
1059          * as an indication that we attempt to signal cancellation. The
1060          * completion will run normally in this case.
1061          */
1062         for_each_node(node) {
1063                 struct io_wqe *wqe = wq->wqes[node];
1064
1065                 io_wqe_cancel_running_work(wqe, &match);
1066                 if (match.nr_running && !match.cancel_all)
1067                         return IO_WQ_CANCEL_RUNNING;
1068         }
1069
1070         if (match.nr_running)
1071                 return IO_WQ_CANCEL_RUNNING;
1072         if (match.nr_pending)
1073                 return IO_WQ_CANCEL_OK;
1074         return IO_WQ_CANCEL_NOTFOUND;
1075 }
1076
1077 static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
1078 {
1079         return work == data;
1080 }
1081
1082 enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
1083 {
1084         return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
1085 }
1086
1087 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1088 {
1089         int ret = -ENOMEM, node;
1090         struct io_wq *wq;
1091
1092         if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1093                 return ERR_PTR(-EINVAL);
1094
1095         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1096         if (!wq)
1097                 return ERR_PTR(-ENOMEM);
1098
1099         wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
1100         if (!wq->wqes)
1101                 goto err_wq;
1102
1103         ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1104         if (ret)
1105                 goto err_wqes;
1106
1107         wq->free_work = data->free_work;
1108         wq->do_work = data->do_work;
1109
1110         /* caller must already hold a reference to this */
1111         wq->user = data->user;
1112
1113         ret = -ENOMEM;
1114         for_each_node(node) {
1115                 struct io_wqe *wqe;
1116                 int alloc_node = node;
1117
1118                 if (!node_online(alloc_node))
1119                         alloc_node = NUMA_NO_NODE;
1120                 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1121                 if (!wqe)
1122                         goto err;
1123                 wq->wqes[node] = wqe;
1124                 wqe->node = alloc_node;
1125                 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1126                 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
1127                 if (wq->user) {
1128                         wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1129                                         task_rlimit(current, RLIMIT_NPROC);
1130                 }
1131                 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1132                 wqe->wq = wq;
1133                 raw_spin_lock_init(&wqe->lock);
1134                 INIT_WQ_LIST(&wqe->work_list);
1135                 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1136                 INIT_LIST_HEAD(&wqe->all_list);
1137         }
1138
1139         init_completion(&wq->done);
1140
1141         wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
1142         if (!IS_ERR(wq->manager)) {
1143                 wake_up_process(wq->manager);
1144                 wait_for_completion(&wq->done);
1145                 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
1146                         ret = -ENOMEM;
1147                         goto err;
1148                 }
1149                 refcount_set(&wq->use_refs, 1);
1150                 reinit_completion(&wq->done);
1151                 return wq;
1152         }
1153
1154         ret = PTR_ERR(wq->manager);
1155         complete(&wq->done);
1156 err:
1157         cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1158         for_each_node(node)
1159                 kfree(wq->wqes[node]);
1160 err_wqes:
1161         kfree(wq->wqes);
1162 err_wq:
1163         kfree(wq);
1164         return ERR_PTR(ret);
1165 }
1166
1167 bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
1168 {
1169         if (data->free_work != wq->free_work || data->do_work != wq->do_work)
1170                 return false;
1171
1172         return refcount_inc_not_zero(&wq->use_refs);
1173 }
1174
1175 static void __io_wq_destroy(struct io_wq *wq)
1176 {
1177         int node;
1178
1179         cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1180
1181         set_bit(IO_WQ_BIT_EXIT, &wq->state);
1182         if (wq->manager)
1183                 kthread_stop(wq->manager);
1184
1185         rcu_read_lock();
1186         for_each_node(node)
1187                 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1188         rcu_read_unlock();
1189
1190         wait_for_completion(&wq->done);
1191
1192         for_each_node(node)
1193                 kfree(wq->wqes[node]);
1194         kfree(wq->wqes);
1195         kfree(wq);
1196 }
1197
1198 void io_wq_destroy(struct io_wq *wq)
1199 {
1200         if (refcount_dec_and_test(&wq->use_refs))
1201                 __io_wq_destroy(wq);
1202 }
1203
1204 struct task_struct *io_wq_get_task(struct io_wq *wq)
1205 {
1206         return wq->manager;
1207 }
1208
1209 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1210 {
1211         struct task_struct *task = worker->task;
1212         struct rq_flags rf;
1213         struct rq *rq;
1214
1215         rq = task_rq_lock(task, &rf);
1216         do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
1217         task->flags |= PF_NO_SETAFFINITY;
1218         task_rq_unlock(rq, task, &rf);
1219         return false;
1220 }
1221
1222 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1223 {
1224         struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1225         int i;
1226
1227         rcu_read_lock();
1228         for_each_node(i)
1229                 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
1230         rcu_read_unlock();
1231         return 0;
1232 }
1233
1234 static __init int io_wq_init(void)
1235 {
1236         int ret;
1237
1238         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1239                                         io_wq_cpu_online, NULL);
1240         if (ret < 0)
1241                 return ret;
1242         io_wq_online = ret;
1243         return 0;
1244 }
1245 subsys_initcall(io_wq_init);