1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/workqueue.c - generic async execution with shared worker pool
5 * Copyright (C) 2002 Ingo Molnar
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
13 * Made to use alloc_percpu by Christoph Lameter.
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
25 * Please read Documentation/core-api/workqueue.rst for details.
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/sched/debug.h>
53 #include <linux/nmi.h>
54 #include <linux/kvm_para.h>
56 #include "workqueue_internal.h"
62 * A bound pool is either associated or disassociated with its CPU.
63 * While associated (!DISASSOCIATED), all workers are bound to the
64 * CPU and none has %WORKER_UNBOUND set and concurrency management
67 * While DISASSOCIATED, the cpu may be offline and all workers have
68 * %WORKER_UNBOUND set and concurrency management disabled, and may
69 * be executing on any CPU. The pool behaves as an unbound one.
71 * Note that DISASSOCIATED should be flipped only while holding
72 * wq_pool_attach_mutex to avoid changing binding state while
73 * worker_attach_to_pool() is in progress.
75 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
76 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
79 WORKER_DIE = 1 << 1, /* die die die */
80 WORKER_IDLE = 1 << 2, /* is idle */
81 WORKER_PREP = 1 << 3, /* preparing to run works */
82 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
83 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
84 WORKER_REBOUND = 1 << 8, /* worker was rebound */
86 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
87 WORKER_UNBOUND | WORKER_REBOUND,
89 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
91 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
92 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
94 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
95 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
97 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
98 /* call for help after 10ms
100 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
101 CREATE_COOLDOWN = HZ, /* time to breath after fail */
104 * Rescue workers are used only on emergencies and shared by
105 * all cpus. Give MIN_NICE.
107 RESCUER_NICE_LEVEL = MIN_NICE,
108 HIGHPRI_NICE_LEVEL = MIN_NICE,
114 * Structure fields follow one of the following exclusion rules.
116 * I: Modifiable by initialization/destruction paths and read-only for
119 * P: Preemption protected. Disabling preemption is enough and should
120 * only be modified and accessed from the local cpu.
122 * L: pool->lock protected. Access with pool->lock held.
124 * X: During normal operation, modification requires pool->lock and should
125 * be done only from local cpu. Either disabling preemption on local
126 * cpu or grabbing pool->lock is enough for read access. If
127 * POOL_DISASSOCIATED is set, it's identical to L.
129 * A: wq_pool_attach_mutex protected.
131 * PL: wq_pool_mutex protected.
133 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
135 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
137 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
140 * WQ: wq->mutex protected.
142 * WR: wq->mutex protected for writes. RCU protected for reads.
144 * MD: wq_mayday_lock protected.
146 * WD: Used internally by the watchdog.
149 /* struct worker is defined in workqueue_internal.h */
152 raw_spinlock_t lock; /* the pool lock */
153 int cpu; /* I: the associated cpu */
154 int node; /* I: the associated node ID */
155 int id; /* I: pool ID */
156 unsigned int flags; /* X: flags */
158 unsigned long watchdog_ts; /* L: watchdog timestamp */
159 bool cpu_stall; /* WD: stalled cpu bound pool */
162 * The counter is incremented in a process context on the associated CPU
163 * w/ preemption disabled, and decremented or reset in the same context
164 * but w/ pool->lock held. The readers grab pool->lock and are
165 * guaranteed to see if the counter reached zero.
169 struct list_head worklist; /* L: list of pending works */
171 int nr_workers; /* L: total number of workers */
172 int nr_idle; /* L: currently idle workers */
174 struct list_head idle_list; /* L: list of idle workers */
175 struct timer_list idle_timer; /* L: worker idle timeout */
176 struct work_struct idle_cull_work; /* L: worker idle cleanup */
178 struct timer_list mayday_timer; /* L: SOS timer for workers */
180 /* a workers is either on busy_hash or idle_list, or the manager */
181 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
182 /* L: hash of busy workers */
184 struct worker *manager; /* L: purely informational */
185 struct list_head workers; /* A: attached workers */
186 struct list_head dying_workers; /* A: workers about to die */
187 struct completion *detach_completion; /* all workers detached */
189 struct ida worker_ida; /* worker IDs for task name */
191 struct workqueue_attrs *attrs; /* I: worker attributes */
192 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
193 int refcnt; /* PL: refcnt for unbound pools */
196 * Destruction of pool is RCU protected to allow dereferences
197 * from get_work_pool().
203 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
204 * of work_struct->data are used for flags and the remaining high bits
205 * point to the pwq; thus, pwqs need to be aligned at two's power of the
206 * number of flag bits.
208 struct pool_workqueue {
209 struct worker_pool *pool; /* I: the associated pool */
210 struct workqueue_struct *wq; /* I: the owning workqueue */
211 int work_color; /* L: current color */
212 int flush_color; /* L: flushing color */
213 int refcnt; /* L: reference count */
214 int nr_in_flight[WORK_NR_COLORS];
215 /* L: nr of in_flight works */
218 * nr_active management and WORK_STRUCT_INACTIVE:
220 * When pwq->nr_active >= max_active, new work item is queued to
221 * pwq->inactive_works instead of pool->worklist and marked with
222 * WORK_STRUCT_INACTIVE.
224 * All work items marked with WORK_STRUCT_INACTIVE do not participate
225 * in pwq->nr_active and all work items in pwq->inactive_works are
226 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
227 * work items are in pwq->inactive_works. Some of them are ready to
228 * run in pool->worklist or worker->scheduled. Those work itmes are
229 * only struct wq_barrier which is used for flush_work() and should
230 * not participate in pwq->nr_active. For non-barrier work item, it
231 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
233 int nr_active; /* L: nr of active works */
234 int max_active; /* L: max active works */
235 struct list_head inactive_works; /* L: inactive works */
236 struct list_head pwqs_node; /* WR: node on wq->pwqs */
237 struct list_head mayday_node; /* MD: node on wq->maydays */
240 * Release of unbound pwq is punted to system_wq. See put_pwq()
241 * and pwq_unbound_release_workfn() for details. pool_workqueue
242 * itself is also RCU protected so that the first pwq can be
243 * determined without grabbing wq->mutex.
245 struct work_struct unbound_release_work;
247 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
250 * Structure used to wait for workqueue flush.
253 struct list_head list; /* WQ: list of flushers */
254 int flush_color; /* WQ: flush color waiting for */
255 struct completion done; /* flush completion */
261 * The externally visible workqueue. It relays the issued work items to
262 * the appropriate worker_pool through its pool_workqueues.
264 struct workqueue_struct {
265 struct list_head pwqs; /* WR: all pwqs of this wq */
266 struct list_head list; /* PR: list of all workqueues */
268 struct mutex mutex; /* protects this wq */
269 int work_color; /* WQ: current work color */
270 int flush_color; /* WQ: current flush color */
271 atomic_t nr_pwqs_to_flush; /* flush in progress */
272 struct wq_flusher *first_flusher; /* WQ: first flusher */
273 struct list_head flusher_queue; /* WQ: flush waiters */
274 struct list_head flusher_overflow; /* WQ: flush overflow list */
276 struct list_head maydays; /* MD: pwqs requesting rescue */
277 struct worker *rescuer; /* MD: rescue worker */
279 int nr_drainers; /* WQ: drain in progress */
280 int saved_max_active; /* WQ: saved pwq max_active */
282 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
283 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
286 struct wq_device *wq_dev; /* I: for sysfs interface */
288 #ifdef CONFIG_LOCKDEP
290 struct lock_class_key key;
291 struct lockdep_map lockdep_map;
293 char name[WQ_NAME_LEN]; /* I: workqueue name */
296 * Destruction of workqueue_struct is RCU protected to allow walking
297 * the workqueues list without grabbing wq_pool_mutex.
298 * This is used to dump all workqueues from sysrq.
302 /* hot fields used during command issue, aligned to cacheline */
303 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
304 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
305 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
308 static struct kmem_cache *pwq_cache;
310 static cpumask_var_t *wq_numa_possible_cpumask;
311 /* possible CPUs of each node */
313 static bool wq_disable_numa;
314 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
316 /* see the comment above the definition of WQ_POWER_EFFICIENT */
317 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
318 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
320 static bool wq_online; /* can kworkers be created yet? */
322 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
324 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
325 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
327 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
328 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
329 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
330 /* wait for manager to go away */
331 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
333 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
334 static bool workqueue_freezing; /* PL: have wqs started freezing? */
336 /* PL&A: allowable cpus for unbound wqs and work items */
337 static cpumask_var_t wq_unbound_cpumask;
339 /* CPU where unbound work was last round robin scheduled from this CPU */
340 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
343 * Local execution of unbound work items is no longer guaranteed. The
344 * following always forces round-robin CPU selection on unbound work items
345 * to uncover usages which depend on it.
347 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
348 static bool wq_debug_force_rr_cpu = true;
350 static bool wq_debug_force_rr_cpu = false;
352 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
354 /* the per-cpu worker pools */
355 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
357 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
359 /* PL: hash of all unbound pools keyed by pool->attrs */
360 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
362 /* I: attributes used when instantiating standard unbound pools on demand */
363 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
365 /* I: attributes used when instantiating ordered pools on demand */
366 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
368 struct workqueue_struct *system_wq __read_mostly;
369 EXPORT_SYMBOL(system_wq);
370 struct workqueue_struct *system_highpri_wq __read_mostly;
371 EXPORT_SYMBOL_GPL(system_highpri_wq);
372 struct workqueue_struct *system_long_wq __read_mostly;
373 EXPORT_SYMBOL_GPL(system_long_wq);
374 struct workqueue_struct *system_unbound_wq __read_mostly;
375 EXPORT_SYMBOL_GPL(system_unbound_wq);
376 struct workqueue_struct *system_freezable_wq __read_mostly;
377 EXPORT_SYMBOL_GPL(system_freezable_wq);
378 struct workqueue_struct *system_power_efficient_wq __read_mostly;
379 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
380 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
381 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
383 static int worker_thread(void *__worker);
384 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
385 static void show_pwq(struct pool_workqueue *pwq);
386 static void show_one_worker_pool(struct worker_pool *pool);
388 #define CREATE_TRACE_POINTS
389 #include <trace/events/workqueue.h>
391 #define assert_rcu_or_pool_mutex() \
392 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
393 !lockdep_is_held(&wq_pool_mutex), \
394 "RCU or wq_pool_mutex should be held")
396 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
397 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
398 !lockdep_is_held(&wq->mutex) && \
399 !lockdep_is_held(&wq_pool_mutex), \
400 "RCU, wq->mutex or wq_pool_mutex should be held")
402 #define for_each_cpu_worker_pool(pool, cpu) \
403 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
404 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
408 * for_each_pool - iterate through all worker_pools in the system
409 * @pool: iteration cursor
410 * @pi: integer used for iteration
412 * This must be called either with wq_pool_mutex held or RCU read
413 * locked. If the pool needs to be used beyond the locking in effect, the
414 * caller is responsible for guaranteeing that the pool stays online.
416 * The if/else clause exists only for the lockdep assertion and can be
419 #define for_each_pool(pool, pi) \
420 idr_for_each_entry(&worker_pool_idr, pool, pi) \
421 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
425 * for_each_pool_worker - iterate through all workers of a worker_pool
426 * @worker: iteration cursor
427 * @pool: worker_pool to iterate workers of
429 * This must be called with wq_pool_attach_mutex.
431 * The if/else clause exists only for the lockdep assertion and can be
434 #define for_each_pool_worker(worker, pool) \
435 list_for_each_entry((worker), &(pool)->workers, node) \
436 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
440 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
441 * @pwq: iteration cursor
442 * @wq: the target workqueue
444 * This must be called either with wq->mutex held or RCU read locked.
445 * If the pwq needs to be used beyond the locking in effect, the caller is
446 * responsible for guaranteeing that the pwq stays online.
448 * The if/else clause exists only for the lockdep assertion and can be
451 #define for_each_pwq(pwq, wq) \
452 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
453 lockdep_is_held(&(wq->mutex)))
455 #ifdef CONFIG_DEBUG_OBJECTS_WORK
457 static const struct debug_obj_descr work_debug_descr;
459 static void *work_debug_hint(void *addr)
461 return ((struct work_struct *) addr)->func;
464 static bool work_is_static_object(void *addr)
466 struct work_struct *work = addr;
468 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
472 * fixup_init is called when:
473 * - an active object is initialized
475 static bool work_fixup_init(void *addr, enum debug_obj_state state)
477 struct work_struct *work = addr;
480 case ODEBUG_STATE_ACTIVE:
481 cancel_work_sync(work);
482 debug_object_init(work, &work_debug_descr);
490 * fixup_free is called when:
491 * - an active object is freed
493 static bool work_fixup_free(void *addr, enum debug_obj_state state)
495 struct work_struct *work = addr;
498 case ODEBUG_STATE_ACTIVE:
499 cancel_work_sync(work);
500 debug_object_free(work, &work_debug_descr);
507 static const struct debug_obj_descr work_debug_descr = {
508 .name = "work_struct",
509 .debug_hint = work_debug_hint,
510 .is_static_object = work_is_static_object,
511 .fixup_init = work_fixup_init,
512 .fixup_free = work_fixup_free,
515 static inline void debug_work_activate(struct work_struct *work)
517 debug_object_activate(work, &work_debug_descr);
520 static inline void debug_work_deactivate(struct work_struct *work)
522 debug_object_deactivate(work, &work_debug_descr);
525 void __init_work(struct work_struct *work, int onstack)
528 debug_object_init_on_stack(work, &work_debug_descr);
530 debug_object_init(work, &work_debug_descr);
532 EXPORT_SYMBOL_GPL(__init_work);
534 void destroy_work_on_stack(struct work_struct *work)
536 debug_object_free(work, &work_debug_descr);
538 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
540 void destroy_delayed_work_on_stack(struct delayed_work *work)
542 destroy_timer_on_stack(&work->timer);
543 debug_object_free(&work->work, &work_debug_descr);
545 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
548 static inline void debug_work_activate(struct work_struct *work) { }
549 static inline void debug_work_deactivate(struct work_struct *work) { }
553 * worker_pool_assign_id - allocate ID and assign it to @pool
554 * @pool: the pool pointer of interest
556 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
557 * successfully, -errno on failure.
559 static int worker_pool_assign_id(struct worker_pool *pool)
563 lockdep_assert_held(&wq_pool_mutex);
565 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
575 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
576 * @wq: the target workqueue
579 * This must be called with any of wq_pool_mutex, wq->mutex or RCU
581 * If the pwq needs to be used beyond the locking in effect, the caller is
582 * responsible for guaranteeing that the pwq stays online.
584 * Return: The unbound pool_workqueue for @node.
586 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
589 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
592 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
593 * delayed item is pending. The plan is to keep CPU -> NODE
594 * mapping valid and stable across CPU on/offlines. Once that
595 * happens, this workaround can be removed.
597 if (unlikely(node == NUMA_NO_NODE))
600 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
603 static unsigned int work_color_to_flags(int color)
605 return color << WORK_STRUCT_COLOR_SHIFT;
608 static int get_work_color(unsigned long work_data)
610 return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
611 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
614 static int work_next_color(int color)
616 return (color + 1) % WORK_NR_COLORS;
620 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
621 * contain the pointer to the queued pwq. Once execution starts, the flag
622 * is cleared and the high bits contain OFFQ flags and pool ID.
624 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
625 * and clear_work_data() can be used to set the pwq, pool or clear
626 * work->data. These functions should only be called while the work is
627 * owned - ie. while the PENDING bit is set.
629 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
630 * corresponding to a work. Pool is available once the work has been
631 * queued anywhere after initialization until it is sync canceled. pwq is
632 * available only while the work item is queued.
634 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
635 * canceled. While being canceled, a work item may have its PENDING set
636 * but stay off timer and worklist for arbitrarily long and nobody should
637 * try to steal the PENDING bit.
639 static inline void set_work_data(struct work_struct *work, unsigned long data,
642 WARN_ON_ONCE(!work_pending(work));
643 atomic_long_set(&work->data, data | flags | work_static(work));
646 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
647 unsigned long extra_flags)
649 set_work_data(work, (unsigned long)pwq,
650 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
653 static void set_work_pool_and_keep_pending(struct work_struct *work,
656 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
657 WORK_STRUCT_PENDING);
660 static void set_work_pool_and_clear_pending(struct work_struct *work,
664 * The following wmb is paired with the implied mb in
665 * test_and_set_bit(PENDING) and ensures all updates to @work made
666 * here are visible to and precede any updates by the next PENDING
670 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
672 * The following mb guarantees that previous clear of a PENDING bit
673 * will not be reordered with any speculative LOADS or STORES from
674 * work->current_func, which is executed afterwards. This possible
675 * reordering can lead to a missed execution on attempt to queue
676 * the same @work. E.g. consider this case:
679 * ---------------------------- --------------------------------
681 * 1 STORE event_indicated
682 * 2 queue_work_on() {
683 * 3 test_and_set_bit(PENDING)
684 * 4 } set_..._and_clear_pending() {
685 * 5 set_work_data() # clear bit
687 * 7 work->current_func() {
688 * 8 LOAD event_indicated
691 * Without an explicit full barrier speculative LOAD on line 8 can
692 * be executed before CPU#0 does STORE on line 1. If that happens,
693 * CPU#0 observes the PENDING bit is still set and new execution of
694 * a @work is not queued in a hope, that CPU#1 will eventually
695 * finish the queued @work. Meanwhile CPU#1 does not see
696 * event_indicated is set, because speculative LOAD was executed
697 * before actual STORE.
702 static void clear_work_data(struct work_struct *work)
704 smp_wmb(); /* see set_work_pool_and_clear_pending() */
705 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
708 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
710 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
713 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
715 unsigned long data = atomic_long_read(&work->data);
717 if (data & WORK_STRUCT_PWQ)
718 return work_struct_pwq(data);
724 * get_work_pool - return the worker_pool a given work was associated with
725 * @work: the work item of interest
727 * Pools are created and destroyed under wq_pool_mutex, and allows read
728 * access under RCU read lock. As such, this function should be
729 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
731 * All fields of the returned pool are accessible as long as the above
732 * mentioned locking is in effect. If the returned pool needs to be used
733 * beyond the critical section, the caller is responsible for ensuring the
734 * returned pool is and stays online.
736 * Return: The worker_pool @work was last associated with. %NULL if none.
738 static struct worker_pool *get_work_pool(struct work_struct *work)
740 unsigned long data = atomic_long_read(&work->data);
743 assert_rcu_or_pool_mutex();
745 if (data & WORK_STRUCT_PWQ)
746 return work_struct_pwq(data)->pool;
748 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
749 if (pool_id == WORK_OFFQ_POOL_NONE)
752 return idr_find(&worker_pool_idr, pool_id);
756 * get_work_pool_id - return the worker pool ID a given work is associated with
757 * @work: the work item of interest
759 * Return: The worker_pool ID @work was last associated with.
760 * %WORK_OFFQ_POOL_NONE if none.
762 static int get_work_pool_id(struct work_struct *work)
764 unsigned long data = atomic_long_read(&work->data);
766 if (data & WORK_STRUCT_PWQ)
767 return work_struct_pwq(data)->pool->id;
769 return data >> WORK_OFFQ_POOL_SHIFT;
772 static void mark_work_canceling(struct work_struct *work)
774 unsigned long pool_id = get_work_pool_id(work);
776 pool_id <<= WORK_OFFQ_POOL_SHIFT;
777 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
780 static bool work_is_canceling(struct work_struct *work)
782 unsigned long data = atomic_long_read(&work->data);
784 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
788 * Policy functions. These define the policies on how the global worker
789 * pools are managed. Unless noted otherwise, these functions assume that
790 * they're being called with pool->lock held.
793 static bool __need_more_worker(struct worker_pool *pool)
795 return !pool->nr_running;
799 * Need to wake up a worker? Called from anything but currently
802 * Note that, because unbound workers never contribute to nr_running, this
803 * function will always return %true for unbound pools as long as the
804 * worklist isn't empty.
806 static bool need_more_worker(struct worker_pool *pool)
808 return !list_empty(&pool->worklist) && __need_more_worker(pool);
811 /* Can I start working? Called from busy but !running workers. */
812 static bool may_start_working(struct worker_pool *pool)
814 return pool->nr_idle;
817 /* Do I need to keep working? Called from currently running workers. */
818 static bool keep_working(struct worker_pool *pool)
820 return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
823 /* Do we need a new worker? Called from manager. */
824 static bool need_to_create_worker(struct worker_pool *pool)
826 return need_more_worker(pool) && !may_start_working(pool);
829 /* Do we have too many workers and should some go away? */
830 static bool too_many_workers(struct worker_pool *pool)
832 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
833 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
834 int nr_busy = pool->nr_workers - nr_idle;
836 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
843 /* Return the first idle worker. Called with pool->lock held. */
844 static struct worker *first_idle_worker(struct worker_pool *pool)
846 if (unlikely(list_empty(&pool->idle_list)))
849 return list_first_entry(&pool->idle_list, struct worker, entry);
853 * wake_up_worker - wake up an idle worker
854 * @pool: worker pool to wake worker from
856 * Wake up the first idle worker of @pool.
859 * raw_spin_lock_irq(pool->lock).
861 static void wake_up_worker(struct worker_pool *pool)
863 struct worker *worker = first_idle_worker(pool);
866 wake_up_process(worker->task);
870 * wq_worker_running - a worker is running again
871 * @task: task waking up
873 * This function is called when a worker returns from schedule()
875 void wq_worker_running(struct task_struct *task)
877 struct worker *worker = kthread_data(task);
879 if (!worker->sleeping)
883 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
884 * and the nr_running increment below, we may ruin the nr_running reset
885 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
886 * pool. Protect against such race.
889 if (!(worker->flags & WORKER_NOT_RUNNING))
890 worker->pool->nr_running++;
892 worker->sleeping = 0;
896 * wq_worker_sleeping - a worker is going to sleep
897 * @task: task going to sleep
899 * This function is called from schedule() when a busy worker is
902 void wq_worker_sleeping(struct task_struct *task)
904 struct worker *worker = kthread_data(task);
905 struct worker_pool *pool;
908 * Rescuers, which may not have all the fields set up like normal
909 * workers, also reach here, let's not access anything before
910 * checking NOT_RUNNING.
912 if (worker->flags & WORKER_NOT_RUNNING)
917 /* Return if preempted before wq_worker_running() was reached */
918 if (worker->sleeping)
921 worker->sleeping = 1;
922 raw_spin_lock_irq(&pool->lock);
925 * Recheck in case unbind_workers() preempted us. We don't
926 * want to decrement nr_running after the worker is unbound
927 * and nr_running has been reset.
929 if (worker->flags & WORKER_NOT_RUNNING) {
930 raw_spin_unlock_irq(&pool->lock);
935 if (need_more_worker(pool))
936 wake_up_worker(pool);
937 raw_spin_unlock_irq(&pool->lock);
941 * wq_worker_last_func - retrieve worker's last work function
942 * @task: Task to retrieve last work function of.
944 * Determine the last function a worker executed. This is called from
945 * the scheduler to get a worker's last known identity.
948 * raw_spin_lock_irq(rq->lock)
950 * This function is called during schedule() when a kworker is going
951 * to sleep. It's used by psi to identify aggregation workers during
952 * dequeuing, to allow periodic aggregation to shut-off when that
953 * worker is the last task in the system or cgroup to go to sleep.
955 * As this function doesn't involve any workqueue-related locking, it
956 * only returns stable values when called from inside the scheduler's
957 * queuing and dequeuing paths, when @task, which must be a kworker,
958 * is guaranteed to not be processing any works.
961 * The last work function %current executed as a worker, NULL if it
962 * hasn't executed any work yet.
964 work_func_t wq_worker_last_func(struct task_struct *task)
966 struct worker *worker = kthread_data(task);
968 return worker->last_func;
972 * worker_set_flags - set worker flags and adjust nr_running accordingly
974 * @flags: flags to set
976 * Set @flags in @worker->flags and adjust nr_running accordingly.
979 * raw_spin_lock_irq(pool->lock)
981 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
983 struct worker_pool *pool = worker->pool;
985 WARN_ON_ONCE(worker->task != current);
987 /* If transitioning into NOT_RUNNING, adjust nr_running. */
988 if ((flags & WORKER_NOT_RUNNING) &&
989 !(worker->flags & WORKER_NOT_RUNNING)) {
993 worker->flags |= flags;
997 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
999 * @flags: flags to clear
1001 * Clear @flags in @worker->flags and adjust nr_running accordingly.
1004 * raw_spin_lock_irq(pool->lock)
1006 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
1008 struct worker_pool *pool = worker->pool;
1009 unsigned int oflags = worker->flags;
1011 WARN_ON_ONCE(worker->task != current);
1013 worker->flags &= ~flags;
1016 * If transitioning out of NOT_RUNNING, increment nr_running. Note
1017 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
1018 * of multiple flags, not a single flag.
1020 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1021 if (!(worker->flags & WORKER_NOT_RUNNING))
1026 * find_worker_executing_work - find worker which is executing a work
1027 * @pool: pool of interest
1028 * @work: work to find worker for
1030 * Find a worker which is executing @work on @pool by searching
1031 * @pool->busy_hash which is keyed by the address of @work. For a worker
1032 * to match, its current execution should match the address of @work and
1033 * its work function. This is to avoid unwanted dependency between
1034 * unrelated work executions through a work item being recycled while still
1037 * This is a bit tricky. A work item may be freed once its execution
1038 * starts and nothing prevents the freed area from being recycled for
1039 * another work item. If the same work item address ends up being reused
1040 * before the original execution finishes, workqueue will identify the
1041 * recycled work item as currently executing and make it wait until the
1042 * current execution finishes, introducing an unwanted dependency.
1044 * This function checks the work item address and work function to avoid
1045 * false positives. Note that this isn't complete as one may construct a
1046 * work function which can introduce dependency onto itself through a
1047 * recycled work item. Well, if somebody wants to shoot oneself in the
1048 * foot that badly, there's only so much we can do, and if such deadlock
1049 * actually occurs, it should be easy to locate the culprit work function.
1052 * raw_spin_lock_irq(pool->lock).
1055 * Pointer to worker which is executing @work if found, %NULL
1058 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1059 struct work_struct *work)
1061 struct worker *worker;
1063 hash_for_each_possible(pool->busy_hash, worker, hentry,
1064 (unsigned long)work)
1065 if (worker->current_work == work &&
1066 worker->current_func == work->func)
1073 * move_linked_works - move linked works to a list
1074 * @work: start of series of works to be scheduled
1075 * @head: target list to append @work to
1076 * @nextp: out parameter for nested worklist walking
1078 * Schedule linked works starting from @work to @head. Work series to
1079 * be scheduled starts at @work and includes any consecutive work with
1080 * WORK_STRUCT_LINKED set in its predecessor.
1082 * If @nextp is not NULL, it's updated to point to the next work of
1083 * the last scheduled work. This allows move_linked_works() to be
1084 * nested inside outer list_for_each_entry_safe().
1087 * raw_spin_lock_irq(pool->lock).
1089 static void move_linked_works(struct work_struct *work, struct list_head *head,
1090 struct work_struct **nextp)
1092 struct work_struct *n;
1095 * Linked worklist will always end before the end of the list,
1096 * use NULL for list head.
1098 list_for_each_entry_safe_from(work, n, NULL, entry) {
1099 list_move_tail(&work->entry, head);
1100 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1105 * If we're already inside safe list traversal and have moved
1106 * multiple works to the scheduled queue, the next position
1107 * needs to be updated.
1114 * get_pwq - get an extra reference on the specified pool_workqueue
1115 * @pwq: pool_workqueue to get
1117 * Obtain an extra reference on @pwq. The caller should guarantee that
1118 * @pwq has positive refcnt and be holding the matching pool->lock.
1120 static void get_pwq(struct pool_workqueue *pwq)
1122 lockdep_assert_held(&pwq->pool->lock);
1123 WARN_ON_ONCE(pwq->refcnt <= 0);
1128 * put_pwq - put a pool_workqueue reference
1129 * @pwq: pool_workqueue to put
1131 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1132 * destruction. The caller should be holding the matching pool->lock.
1134 static void put_pwq(struct pool_workqueue *pwq)
1136 lockdep_assert_held(&pwq->pool->lock);
1137 if (likely(--pwq->refcnt))
1139 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1142 * @pwq can't be released under pool->lock, bounce to
1143 * pwq_unbound_release_workfn(). This never recurses on the same
1144 * pool->lock as this path is taken only for unbound workqueues and
1145 * the release work item is scheduled on a per-cpu workqueue. To
1146 * avoid lockdep warning, unbound pool->locks are given lockdep
1147 * subclass of 1 in get_unbound_pool().
1149 schedule_work(&pwq->unbound_release_work);
1153 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1154 * @pwq: pool_workqueue to put (can be %NULL)
1156 * put_pwq() with locking. This function also allows %NULL @pwq.
1158 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1162 * As both pwqs and pools are RCU protected, the
1163 * following lock operations are safe.
1165 raw_spin_lock_irq(&pwq->pool->lock);
1167 raw_spin_unlock_irq(&pwq->pool->lock);
1171 static void pwq_activate_inactive_work(struct work_struct *work)
1173 struct pool_workqueue *pwq = get_work_pwq(work);
1175 trace_workqueue_activate_work(work);
1176 if (list_empty(&pwq->pool->worklist))
1177 pwq->pool->watchdog_ts = jiffies;
1178 move_linked_works(work, &pwq->pool->worklist, NULL);
1179 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1183 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1185 struct work_struct *work = list_first_entry(&pwq->inactive_works,
1186 struct work_struct, entry);
1188 pwq_activate_inactive_work(work);
1192 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1193 * @pwq: pwq of interest
1194 * @work_data: work_data of work which left the queue
1196 * A work either has completed or is removed from pending queue,
1197 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1200 * raw_spin_lock_irq(pool->lock).
1202 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1204 int color = get_work_color(work_data);
1206 if (!(work_data & WORK_STRUCT_INACTIVE)) {
1208 if (!list_empty(&pwq->inactive_works)) {
1209 /* one down, submit an inactive one */
1210 if (pwq->nr_active < pwq->max_active)
1211 pwq_activate_first_inactive(pwq);
1215 pwq->nr_in_flight[color]--;
1217 /* is flush in progress and are we at the flushing tip? */
1218 if (likely(pwq->flush_color != color))
1221 /* are there still in-flight works? */
1222 if (pwq->nr_in_flight[color])
1225 /* this pwq is done, clear flush_color */
1226 pwq->flush_color = -1;
1229 * If this was the last pwq, wake up the first flusher. It
1230 * will handle the rest.
1232 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1233 complete(&pwq->wq->first_flusher->done);
1239 * try_to_grab_pending - steal work item from worklist and disable irq
1240 * @work: work item to steal
1241 * @is_dwork: @work is a delayed_work
1242 * @flags: place to store irq state
1244 * Try to grab PENDING bit of @work. This function can handle @work in any
1245 * stable state - idle, on timer or on worklist.
1249 * ======== ================================================================
1250 * 1 if @work was pending and we successfully stole PENDING
1251 * 0 if @work was idle and we claimed PENDING
1252 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1253 * -ENOENT if someone else is canceling @work, this state may persist
1254 * for arbitrarily long
1255 * ======== ================================================================
1258 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1259 * interrupted while holding PENDING and @work off queue, irq must be
1260 * disabled on entry. This, combined with delayed_work->timer being
1261 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1263 * On successful return, >= 0, irq is disabled and the caller is
1264 * responsible for releasing it using local_irq_restore(*@flags).
1266 * This function is safe to call from any context including IRQ handler.
1268 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1269 unsigned long *flags)
1271 struct worker_pool *pool;
1272 struct pool_workqueue *pwq;
1274 local_irq_save(*flags);
1276 /* try to steal the timer if it exists */
1278 struct delayed_work *dwork = to_delayed_work(work);
1281 * dwork->timer is irqsafe. If del_timer() fails, it's
1282 * guaranteed that the timer is not queued anywhere and not
1283 * running on the local CPU.
1285 if (likely(del_timer(&dwork->timer)))
1289 /* try to claim PENDING the normal way */
1290 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1295 * The queueing is in progress, or it is already queued. Try to
1296 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1298 pool = get_work_pool(work);
1302 raw_spin_lock(&pool->lock);
1304 * work->data is guaranteed to point to pwq only while the work
1305 * item is queued on pwq->wq, and both updating work->data to point
1306 * to pwq on queueing and to pool on dequeueing are done under
1307 * pwq->pool->lock. This in turn guarantees that, if work->data
1308 * points to pwq which is associated with a locked pool, the work
1309 * item is currently queued on that pool.
1311 pwq = get_work_pwq(work);
1312 if (pwq && pwq->pool == pool) {
1313 debug_work_deactivate(work);
1316 * A cancelable inactive work item must be in the
1317 * pwq->inactive_works since a queued barrier can't be
1318 * canceled (see the comments in insert_wq_barrier()).
1320 * An inactive work item cannot be grabbed directly because
1321 * it might have linked barrier work items which, if left
1322 * on the inactive_works list, will confuse pwq->nr_active
1323 * management later on and cause stall. Make sure the work
1324 * item is activated before grabbing.
1326 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1327 pwq_activate_inactive_work(work);
1329 list_del_init(&work->entry);
1330 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
1332 /* work->data points to pwq iff queued, point to pool */
1333 set_work_pool_and_keep_pending(work, pool->id);
1335 raw_spin_unlock(&pool->lock);
1339 raw_spin_unlock(&pool->lock);
1342 local_irq_restore(*flags);
1343 if (work_is_canceling(work))
1350 * insert_work - insert a work into a pool
1351 * @pwq: pwq @work belongs to
1352 * @work: work to insert
1353 * @head: insertion point
1354 * @extra_flags: extra WORK_STRUCT_* flags to set
1356 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1357 * work_struct flags.
1360 * raw_spin_lock_irq(pool->lock).
1362 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1363 struct list_head *head, unsigned int extra_flags)
1365 struct worker_pool *pool = pwq->pool;
1367 /* record the work call stack in order to print it in KASAN reports */
1368 kasan_record_aux_stack_noalloc(work);
1370 /* we own @work, set data and link */
1371 set_work_pwq(work, pwq, extra_flags);
1372 list_add_tail(&work->entry, head);
1375 if (__need_more_worker(pool))
1376 wake_up_worker(pool);
1380 * Test whether @work is being queued from another work executing on the
1383 static bool is_chained_work(struct workqueue_struct *wq)
1385 struct worker *worker;
1387 worker = current_wq_worker();
1389 * Return %true iff I'm a worker executing a work item on @wq. If
1390 * I'm @worker, it's safe to dereference it without locking.
1392 return worker && worker->current_pwq->wq == wq;
1396 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1397 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1398 * avoid perturbing sensitive tasks.
1400 static int wq_select_unbound_cpu(int cpu)
1404 if (likely(!wq_debug_force_rr_cpu)) {
1405 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1408 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
1411 if (cpumask_empty(wq_unbound_cpumask))
1414 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1415 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1416 if (unlikely(new_cpu >= nr_cpu_ids)) {
1417 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1418 if (unlikely(new_cpu >= nr_cpu_ids))
1421 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1426 static void __queue_work(int cpu, struct workqueue_struct *wq,
1427 struct work_struct *work)
1429 struct pool_workqueue *pwq;
1430 struct worker_pool *last_pool;
1431 struct list_head *worklist;
1432 unsigned int work_flags;
1433 unsigned int req_cpu = cpu;
1436 * While a work item is PENDING && off queue, a task trying to
1437 * steal the PENDING will busy-loop waiting for it to either get
1438 * queued or lose PENDING. Grabbing PENDING and queueing should
1439 * happen with IRQ disabled.
1441 lockdep_assert_irqs_disabled();
1445 * For a draining wq, only works from the same workqueue are
1446 * allowed. The __WQ_DESTROYING helps to spot the issue that
1447 * queues a new work item to a wq after destroy_workqueue(wq).
1449 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
1450 WARN_ON_ONCE(!is_chained_work(wq))))
1454 /* pwq which will be used unless @work is executing elsewhere */
1455 if (wq->flags & WQ_UNBOUND) {
1456 if (req_cpu == WORK_CPU_UNBOUND)
1457 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1458 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1460 if (req_cpu == WORK_CPU_UNBOUND)
1461 cpu = raw_smp_processor_id();
1462 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1466 * If @work was previously on a different pool, it might still be
1467 * running there, in which case the work needs to be queued on that
1468 * pool to guarantee non-reentrancy.
1470 last_pool = get_work_pool(work);
1471 if (last_pool && last_pool != pwq->pool) {
1472 struct worker *worker;
1474 raw_spin_lock(&last_pool->lock);
1476 worker = find_worker_executing_work(last_pool, work);
1478 if (worker && worker->current_pwq->wq == wq) {
1479 pwq = worker->current_pwq;
1481 /* meh... not running there, queue here */
1482 raw_spin_unlock(&last_pool->lock);
1483 raw_spin_lock(&pwq->pool->lock);
1486 raw_spin_lock(&pwq->pool->lock);
1490 * pwq is determined and locked. For unbound pools, we could have
1491 * raced with pwq release and it could already be dead. If its
1492 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1493 * without another pwq replacing it in the numa_pwq_tbl or while
1494 * work items are executing on it, so the retrying is guaranteed to
1495 * make forward-progress.
1497 if (unlikely(!pwq->refcnt)) {
1498 if (wq->flags & WQ_UNBOUND) {
1499 raw_spin_unlock(&pwq->pool->lock);
1504 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1508 /* pwq determined, queue */
1509 trace_workqueue_queue_work(req_cpu, pwq, work);
1511 if (WARN_ON(!list_empty(&work->entry)))
1514 pwq->nr_in_flight[pwq->work_color]++;
1515 work_flags = work_color_to_flags(pwq->work_color);
1517 if (likely(pwq->nr_active < pwq->max_active)) {
1518 trace_workqueue_activate_work(work);
1520 worklist = &pwq->pool->worklist;
1521 if (list_empty(worklist))
1522 pwq->pool->watchdog_ts = jiffies;
1524 work_flags |= WORK_STRUCT_INACTIVE;
1525 worklist = &pwq->inactive_works;
1528 debug_work_activate(work);
1529 insert_work(pwq, work, worklist, work_flags);
1532 raw_spin_unlock(&pwq->pool->lock);
1537 * queue_work_on - queue work on specific cpu
1538 * @cpu: CPU number to execute work on
1539 * @wq: workqueue to use
1540 * @work: work to queue
1542 * We queue the work to a specific CPU, the caller must ensure it
1543 * can't go away. Callers that fail to ensure that the specified
1544 * CPU cannot go away will execute on a randomly chosen CPU.
1546 * Return: %false if @work was already on a queue, %true otherwise.
1548 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1549 struct work_struct *work)
1552 unsigned long flags;
1554 local_irq_save(flags);
1556 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1557 __queue_work(cpu, wq, work);
1561 local_irq_restore(flags);
1564 EXPORT_SYMBOL(queue_work_on);
1567 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1568 * @node: NUMA node ID that we want to select a CPU from
1570 * This function will attempt to find a "random" cpu available on a given
1571 * node. If there are no CPUs available on the given node it will return
1572 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1573 * available CPU if we need to schedule this work.
1575 static int workqueue_select_cpu_near(int node)
1579 /* No point in doing this if NUMA isn't enabled for workqueues */
1580 if (!wq_numa_enabled)
1581 return WORK_CPU_UNBOUND;
1583 /* Delay binding to CPU if node is not valid or online */
1584 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1585 return WORK_CPU_UNBOUND;
1587 /* Use local node/cpu if we are already there */
1588 cpu = raw_smp_processor_id();
1589 if (node == cpu_to_node(cpu))
1592 /* Use "random" otherwise know as "first" online CPU of node */
1593 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1595 /* If CPU is valid return that, otherwise just defer */
1596 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1600 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1601 * @node: NUMA node that we are targeting the work for
1602 * @wq: workqueue to use
1603 * @work: work to queue
1605 * We queue the work to a "random" CPU within a given NUMA node. The basic
1606 * idea here is to provide a way to somehow associate work with a given
1609 * This function will only make a best effort attempt at getting this onto
1610 * the right NUMA node. If no node is requested or the requested node is
1611 * offline then we just fall back to standard queue_work behavior.
1613 * Currently the "random" CPU ends up being the first available CPU in the
1614 * intersection of cpu_online_mask and the cpumask of the node, unless we
1615 * are running on the node. In that case we just use the current CPU.
1617 * Return: %false if @work was already on a queue, %true otherwise.
1619 bool queue_work_node(int node, struct workqueue_struct *wq,
1620 struct work_struct *work)
1622 unsigned long flags;
1626 * This current implementation is specific to unbound workqueues.
1627 * Specifically we only return the first available CPU for a given
1628 * node instead of cycling through individual CPUs within the node.
1630 * If this is used with a per-cpu workqueue then the logic in
1631 * workqueue_select_cpu_near would need to be updated to allow for
1632 * some round robin type logic.
1634 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1636 local_irq_save(flags);
1638 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1639 int cpu = workqueue_select_cpu_near(node);
1641 __queue_work(cpu, wq, work);
1645 local_irq_restore(flags);
1648 EXPORT_SYMBOL_GPL(queue_work_node);
1650 void delayed_work_timer_fn(struct timer_list *t)
1652 struct delayed_work *dwork = from_timer(dwork, t, timer);
1654 /* should have been called from irqsafe timer with irq already off */
1655 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1657 EXPORT_SYMBOL(delayed_work_timer_fn);
1659 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1660 struct delayed_work *dwork, unsigned long delay)
1662 struct timer_list *timer = &dwork->timer;
1663 struct work_struct *work = &dwork->work;
1666 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1667 WARN_ON_ONCE(timer_pending(timer));
1668 WARN_ON_ONCE(!list_empty(&work->entry));
1671 * If @delay is 0, queue @dwork->work immediately. This is for
1672 * both optimization and correctness. The earliest @timer can
1673 * expire is on the closest next tick and delayed_work users depend
1674 * on that there's no such delay when @delay is 0.
1677 __queue_work(cpu, wq, &dwork->work);
1683 timer->expires = jiffies + delay;
1685 if (unlikely(cpu != WORK_CPU_UNBOUND))
1686 add_timer_on(timer, cpu);
1692 * queue_delayed_work_on - queue work on specific CPU after delay
1693 * @cpu: CPU number to execute work on
1694 * @wq: workqueue to use
1695 * @dwork: work to queue
1696 * @delay: number of jiffies to wait before queueing
1698 * Return: %false if @work was already on a queue, %true otherwise. If
1699 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1702 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1703 struct delayed_work *dwork, unsigned long delay)
1705 struct work_struct *work = &dwork->work;
1707 unsigned long flags;
1709 /* read the comment in __queue_work() */
1710 local_irq_save(flags);
1712 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1713 __queue_delayed_work(cpu, wq, dwork, delay);
1717 local_irq_restore(flags);
1720 EXPORT_SYMBOL(queue_delayed_work_on);
1723 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1724 * @cpu: CPU number to execute work on
1725 * @wq: workqueue to use
1726 * @dwork: work to queue
1727 * @delay: number of jiffies to wait before queueing
1729 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1730 * modify @dwork's timer so that it expires after @delay. If @delay is
1731 * zero, @work is guaranteed to be scheduled immediately regardless of its
1734 * Return: %false if @dwork was idle and queued, %true if @dwork was
1735 * pending and its timer was modified.
1737 * This function is safe to call from any context including IRQ handler.
1738 * See try_to_grab_pending() for details.
1740 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1741 struct delayed_work *dwork, unsigned long delay)
1743 unsigned long flags;
1747 ret = try_to_grab_pending(&dwork->work, true, &flags);
1748 } while (unlikely(ret == -EAGAIN));
1750 if (likely(ret >= 0)) {
1751 __queue_delayed_work(cpu, wq, dwork, delay);
1752 local_irq_restore(flags);
1755 /* -ENOENT from try_to_grab_pending() becomes %true */
1758 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1760 static void rcu_work_rcufn(struct rcu_head *rcu)
1762 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1764 /* read the comment in __queue_work() */
1765 local_irq_disable();
1766 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1771 * queue_rcu_work - queue work after a RCU grace period
1772 * @wq: workqueue to use
1773 * @rwork: work to queue
1775 * Return: %false if @rwork was already pending, %true otherwise. Note
1776 * that a full RCU grace period is guaranteed only after a %true return.
1777 * While @rwork is guaranteed to be executed after a %false return, the
1778 * execution may happen before a full RCU grace period has passed.
1780 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1782 struct work_struct *work = &rwork->work;
1784 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1786 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
1792 EXPORT_SYMBOL(queue_rcu_work);
1795 * worker_enter_idle - enter idle state
1796 * @worker: worker which is entering idle state
1798 * @worker is entering idle state. Update stats and idle timer if
1802 * raw_spin_lock_irq(pool->lock).
1804 static void worker_enter_idle(struct worker *worker)
1806 struct worker_pool *pool = worker->pool;
1808 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1809 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1810 (worker->hentry.next || worker->hentry.pprev)))
1813 /* can't use worker_set_flags(), also called from create_worker() */
1814 worker->flags |= WORKER_IDLE;
1816 worker->last_active = jiffies;
1818 /* idle_list is LIFO */
1819 list_add(&worker->entry, &pool->idle_list);
1821 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1822 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1824 /* Sanity check nr_running. */
1825 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1829 * worker_leave_idle - leave idle state
1830 * @worker: worker which is leaving idle state
1832 * @worker is leaving idle state. Update stats.
1835 * raw_spin_lock_irq(pool->lock).
1837 static void worker_leave_idle(struct worker *worker)
1839 struct worker_pool *pool = worker->pool;
1841 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1843 worker_clr_flags(worker, WORKER_IDLE);
1845 list_del_init(&worker->entry);
1848 static struct worker *alloc_worker(int node)
1850 struct worker *worker;
1852 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1854 INIT_LIST_HEAD(&worker->entry);
1855 INIT_LIST_HEAD(&worker->scheduled);
1856 INIT_LIST_HEAD(&worker->node);
1857 /* on creation a worker is in !idle && prep state */
1858 worker->flags = WORKER_PREP;
1864 * worker_attach_to_pool() - attach a worker to a pool
1865 * @worker: worker to be attached
1866 * @pool: the target pool
1868 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1869 * cpu-binding of @worker are kept coordinated with the pool across
1872 static void worker_attach_to_pool(struct worker *worker,
1873 struct worker_pool *pool)
1875 mutex_lock(&wq_pool_attach_mutex);
1878 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1879 * stable across this function. See the comments above the flag
1880 * definition for details.
1882 if (pool->flags & POOL_DISASSOCIATED)
1883 worker->flags |= WORKER_UNBOUND;
1885 kthread_set_per_cpu(worker->task, pool->cpu);
1887 if (worker->rescue_wq)
1888 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1890 list_add_tail(&worker->node, &pool->workers);
1891 worker->pool = pool;
1893 mutex_unlock(&wq_pool_attach_mutex);
1897 * worker_detach_from_pool() - detach a worker from its pool
1898 * @worker: worker which is attached to its pool
1900 * Undo the attaching which had been done in worker_attach_to_pool(). The
1901 * caller worker shouldn't access to the pool after detached except it has
1902 * other reference to the pool.
1904 static void worker_detach_from_pool(struct worker *worker)
1906 struct worker_pool *pool = worker->pool;
1907 struct completion *detach_completion = NULL;
1909 mutex_lock(&wq_pool_attach_mutex);
1911 kthread_set_per_cpu(worker->task, -1);
1912 list_del(&worker->node);
1913 worker->pool = NULL;
1915 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
1916 detach_completion = pool->detach_completion;
1917 mutex_unlock(&wq_pool_attach_mutex);
1919 /* clear leftover flags without pool->lock after it is detached */
1920 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1922 if (detach_completion)
1923 complete(detach_completion);
1927 * create_worker - create a new workqueue worker
1928 * @pool: pool the new worker will belong to
1930 * Create and start a new worker which is attached to @pool.
1933 * Might sleep. Does GFP_KERNEL allocations.
1936 * Pointer to the newly created worker.
1938 static struct worker *create_worker(struct worker_pool *pool)
1940 struct worker *worker;
1944 /* ID is needed to determine kthread name */
1945 id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
1947 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
1952 worker = alloc_worker(pool->node);
1954 pr_err_once("workqueue: Failed to allocate a worker\n");
1961 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1962 pool->attrs->nice < 0 ? "H" : "");
1964 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1966 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1967 "kworker/%s", id_buf);
1968 if (IS_ERR(worker->task)) {
1969 if (PTR_ERR(worker->task) == -EINTR) {
1970 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
1973 pr_err_once("workqueue: Failed to create a worker thread: %pe",
1979 set_user_nice(worker->task, pool->attrs->nice);
1980 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1982 /* successful, attach the worker to the pool */
1983 worker_attach_to_pool(worker, pool);
1985 /* start the newly created worker */
1986 raw_spin_lock_irq(&pool->lock);
1987 worker->pool->nr_workers++;
1988 worker_enter_idle(worker);
1989 wake_up_process(worker->task);
1990 raw_spin_unlock_irq(&pool->lock);
1995 ida_free(&pool->worker_ida, id);
2000 static void unbind_worker(struct worker *worker)
2002 lockdep_assert_held(&wq_pool_attach_mutex);
2004 kthread_set_per_cpu(worker->task, -1);
2005 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2006 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2008 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2011 static void wake_dying_workers(struct list_head *cull_list)
2013 struct worker *worker, *tmp;
2015 list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2016 list_del_init(&worker->entry);
2017 unbind_worker(worker);
2019 * If the worker was somehow already running, then it had to be
2020 * in pool->idle_list when set_worker_dying() happened or we
2021 * wouldn't have gotten here.
2023 * Thus, the worker must either have observed the WORKER_DIE
2024 * flag, or have set its state to TASK_IDLE. Either way, the
2025 * below will be observed by the worker and is safe to do
2026 * outside of pool->lock.
2028 wake_up_process(worker->task);
2033 * set_worker_dying - Tag a worker for destruction
2034 * @worker: worker to be destroyed
2035 * @list: transfer worker away from its pool->idle_list and into list
2037 * Tag @worker for destruction and adjust @pool stats accordingly. The worker
2041 * raw_spin_lock_irq(pool->lock).
2043 static void set_worker_dying(struct worker *worker, struct list_head *list)
2045 struct worker_pool *pool = worker->pool;
2047 lockdep_assert_held(&pool->lock);
2048 lockdep_assert_held(&wq_pool_attach_mutex);
2050 /* sanity check frenzy */
2051 if (WARN_ON(worker->current_work) ||
2052 WARN_ON(!list_empty(&worker->scheduled)) ||
2053 WARN_ON(!(worker->flags & WORKER_IDLE)))
2059 worker->flags |= WORKER_DIE;
2061 list_move(&worker->entry, list);
2062 list_move(&worker->node, &pool->dying_workers);
2066 * idle_worker_timeout - check if some idle workers can now be deleted.
2067 * @t: The pool's idle_timer that just expired
2069 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2070 * worker_leave_idle(), as a worker flicking between idle and active while its
2071 * pool is at the too_many_workers() tipping point would cause too much timer
2072 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2073 * it expire and re-evaluate things from there.
2075 static void idle_worker_timeout(struct timer_list *t)
2077 struct worker_pool *pool = from_timer(pool, t, idle_timer);
2078 bool do_cull = false;
2080 if (work_pending(&pool->idle_cull_work))
2083 raw_spin_lock_irq(&pool->lock);
2085 if (too_many_workers(pool)) {
2086 struct worker *worker;
2087 unsigned long expires;
2089 /* idle_list is kept in LIFO order, check the last one */
2090 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2091 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2092 do_cull = !time_before(jiffies, expires);
2095 mod_timer(&pool->idle_timer, expires);
2097 raw_spin_unlock_irq(&pool->lock);
2100 queue_work(system_unbound_wq, &pool->idle_cull_work);
2104 * idle_cull_fn - cull workers that have been idle for too long.
2105 * @work: the pool's work for handling these idle workers
2107 * This goes through a pool's idle workers and gets rid of those that have been
2108 * idle for at least IDLE_WORKER_TIMEOUT seconds.
2110 * We don't want to disturb isolated CPUs because of a pcpu kworker being
2111 * culled, so this also resets worker affinity. This requires a sleepable
2112 * context, hence the split between timer callback and work item.
2114 static void idle_cull_fn(struct work_struct *work)
2116 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2117 struct list_head cull_list;
2119 INIT_LIST_HEAD(&cull_list);
2121 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2122 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2123 * path. This is required as a previously-preempted worker could run after
2124 * set_worker_dying() has happened but before wake_dying_workers() did.
2126 mutex_lock(&wq_pool_attach_mutex);
2127 raw_spin_lock_irq(&pool->lock);
2129 while (too_many_workers(pool)) {
2130 struct worker *worker;
2131 unsigned long expires;
2133 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2134 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2136 if (time_before(jiffies, expires)) {
2137 mod_timer(&pool->idle_timer, expires);
2141 set_worker_dying(worker, &cull_list);
2144 raw_spin_unlock_irq(&pool->lock);
2145 wake_dying_workers(&cull_list);
2146 mutex_unlock(&wq_pool_attach_mutex);
2149 static void send_mayday(struct work_struct *work)
2151 struct pool_workqueue *pwq = get_work_pwq(work);
2152 struct workqueue_struct *wq = pwq->wq;
2154 lockdep_assert_held(&wq_mayday_lock);
2159 /* mayday mayday mayday */
2160 if (list_empty(&pwq->mayday_node)) {
2162 * If @pwq is for an unbound wq, its base ref may be put at
2163 * any time due to an attribute change. Pin @pwq until the
2164 * rescuer is done with it.
2167 list_add_tail(&pwq->mayday_node, &wq->maydays);
2168 wake_up_process(wq->rescuer->task);
2172 static void pool_mayday_timeout(struct timer_list *t)
2174 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2175 struct work_struct *work;
2177 raw_spin_lock_irq(&pool->lock);
2178 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
2180 if (need_to_create_worker(pool)) {
2182 * We've been trying to create a new worker but
2183 * haven't been successful. We might be hitting an
2184 * allocation deadlock. Send distress signals to
2187 list_for_each_entry(work, &pool->worklist, entry)
2191 raw_spin_unlock(&wq_mayday_lock);
2192 raw_spin_unlock_irq(&pool->lock);
2194 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2198 * maybe_create_worker - create a new worker if necessary
2199 * @pool: pool to create a new worker for
2201 * Create a new worker for @pool if necessary. @pool is guaranteed to
2202 * have at least one idle worker on return from this function. If
2203 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2204 * sent to all rescuers with works scheduled on @pool to resolve
2205 * possible allocation deadlock.
2207 * On return, need_to_create_worker() is guaranteed to be %false and
2208 * may_start_working() %true.
2211 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2212 * multiple times. Does GFP_KERNEL allocations. Called only from
2215 static void maybe_create_worker(struct worker_pool *pool)
2216 __releases(&pool->lock)
2217 __acquires(&pool->lock)
2220 raw_spin_unlock_irq(&pool->lock);
2222 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2223 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2226 if (create_worker(pool) || !need_to_create_worker(pool))
2229 schedule_timeout_interruptible(CREATE_COOLDOWN);
2231 if (!need_to_create_worker(pool))
2235 del_timer_sync(&pool->mayday_timer);
2236 raw_spin_lock_irq(&pool->lock);
2238 * This is necessary even after a new worker was just successfully
2239 * created as @pool->lock was dropped and the new worker might have
2240 * already become busy.
2242 if (need_to_create_worker(pool))
2247 * manage_workers - manage worker pool
2250 * Assume the manager role and manage the worker pool @worker belongs
2251 * to. At any given time, there can be only zero or one manager per
2252 * pool. The exclusion is handled automatically by this function.
2254 * The caller can safely start processing works on false return. On
2255 * true return, it's guaranteed that need_to_create_worker() is false
2256 * and may_start_working() is true.
2259 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2260 * multiple times. Does GFP_KERNEL allocations.
2263 * %false if the pool doesn't need management and the caller can safely
2264 * start processing works, %true if management function was performed and
2265 * the conditions that the caller verified before calling the function may
2266 * no longer be true.
2268 static bool manage_workers(struct worker *worker)
2270 struct worker_pool *pool = worker->pool;
2272 if (pool->flags & POOL_MANAGER_ACTIVE)
2275 pool->flags |= POOL_MANAGER_ACTIVE;
2276 pool->manager = worker;
2278 maybe_create_worker(pool);
2280 pool->manager = NULL;
2281 pool->flags &= ~POOL_MANAGER_ACTIVE;
2282 rcuwait_wake_up(&manager_wait);
2287 * process_one_work - process single work
2289 * @work: work to process
2291 * Process @work. This function contains all the logics necessary to
2292 * process a single work including synchronization against and
2293 * interaction with other workers on the same cpu, queueing and
2294 * flushing. As long as context requirement is met, any worker can
2295 * call this function to process a work.
2298 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2300 static void process_one_work(struct worker *worker, struct work_struct *work)
2301 __releases(&pool->lock)
2302 __acquires(&pool->lock)
2304 struct pool_workqueue *pwq = get_work_pwq(work);
2305 struct worker_pool *pool = worker->pool;
2306 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2307 unsigned long work_data;
2308 struct worker *collision;
2309 #ifdef CONFIG_LOCKDEP
2311 * It is permissible to free the struct work_struct from
2312 * inside the function that is called from it, this we need to
2313 * take into account for lockdep too. To avoid bogus "held
2314 * lock freed" warnings as well as problems when looking into
2315 * work->lockdep_map, make a copy and use that here.
2317 struct lockdep_map lockdep_map;
2319 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2321 /* ensure we're on the correct CPU */
2322 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2323 raw_smp_processor_id() != pool->cpu);
2326 * A single work shouldn't be executed concurrently by
2327 * multiple workers on a single cpu. Check whether anyone is
2328 * already processing the work. If so, defer the work to the
2329 * currently executing one.
2331 collision = find_worker_executing_work(pool, work);
2332 if (unlikely(collision)) {
2333 move_linked_works(work, &collision->scheduled, NULL);
2337 /* claim and dequeue */
2338 debug_work_deactivate(work);
2339 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2340 worker->current_work = work;
2341 worker->current_func = work->func;
2342 worker->current_pwq = pwq;
2343 work_data = *work_data_bits(work);
2344 worker->current_color = get_work_color(work_data);
2347 * Record wq name for cmdline and debug reporting, may get
2348 * overridden through set_worker_desc().
2350 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2352 list_del_init(&work->entry);
2355 * CPU intensive works don't participate in concurrency management.
2356 * They're the scheduler's responsibility. This takes @worker out
2357 * of concurrency management and the next code block will chain
2358 * execution of the pending work items.
2360 if (unlikely(cpu_intensive))
2361 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2364 * Wake up another worker if necessary. The condition is always
2365 * false for normal per-cpu workers since nr_running would always
2366 * be >= 1 at this point. This is used to chain execution of the
2367 * pending work items for WORKER_NOT_RUNNING workers such as the
2368 * UNBOUND and CPU_INTENSIVE ones.
2370 if (need_more_worker(pool))
2371 wake_up_worker(pool);
2374 * Record the last pool and clear PENDING which should be the last
2375 * update to @work. Also, do this inside @pool->lock so that
2376 * PENDING and queued state changes happen together while IRQ is
2379 set_work_pool_and_clear_pending(work, pool->id);
2381 raw_spin_unlock_irq(&pool->lock);
2383 lock_map_acquire(&pwq->wq->lockdep_map);
2384 lock_map_acquire(&lockdep_map);
2386 * Strictly speaking we should mark the invariant state without holding
2387 * any locks, that is, before these two lock_map_acquire()'s.
2389 * However, that would result in:
2396 * Which would create W1->C->W1 dependencies, even though there is no
2397 * actual deadlock possible. There are two solutions, using a
2398 * read-recursive acquire on the work(queue) 'locks', but this will then
2399 * hit the lockdep limitation on recursive locks, or simply discard
2402 * AFAICT there is no possible deadlock scenario between the
2403 * flush_work() and complete() primitives (except for single-threaded
2404 * workqueues), so hiding them isn't a problem.
2406 lockdep_invariant_state(true);
2407 trace_workqueue_execute_start(work);
2408 worker->current_func(work);
2410 * While we must be careful to not use "work" after this, the trace
2411 * point will only record its address.
2413 trace_workqueue_execute_end(work, worker->current_func);
2414 lock_map_release(&lockdep_map);
2415 lock_map_release(&pwq->wq->lockdep_map);
2417 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2418 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2419 " last function: %ps\n",
2420 current->comm, preempt_count(), task_pid_nr(current),
2421 worker->current_func);
2422 debug_show_held_locks(current);
2427 * The following prevents a kworker from hogging CPU on !PREEMPTION
2428 * kernels, where a requeueing work item waiting for something to
2429 * happen could deadlock with stop_machine as such work item could
2430 * indefinitely requeue itself while all other CPUs are trapped in
2431 * stop_machine. At the same time, report a quiescent RCU state so
2432 * the same condition doesn't freeze RCU.
2436 raw_spin_lock_irq(&pool->lock);
2438 /* clear cpu intensive status */
2439 if (unlikely(cpu_intensive))
2440 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2442 /* tag the worker for identification in schedule() */
2443 worker->last_func = worker->current_func;
2445 /* we're done with it, release */
2446 hash_del(&worker->hentry);
2447 worker->current_work = NULL;
2448 worker->current_func = NULL;
2449 worker->current_pwq = NULL;
2450 worker->current_color = INT_MAX;
2451 pwq_dec_nr_in_flight(pwq, work_data);
2455 * process_scheduled_works - process scheduled works
2458 * Process all scheduled works. Please note that the scheduled list
2459 * may change while processing a work, so this function repeatedly
2460 * fetches a work from the top and executes it.
2463 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2466 static void process_scheduled_works(struct worker *worker)
2468 while (!list_empty(&worker->scheduled)) {
2469 struct work_struct *work = list_first_entry(&worker->scheduled,
2470 struct work_struct, entry);
2471 process_one_work(worker, work);
2475 static void set_pf_worker(bool val)
2477 mutex_lock(&wq_pool_attach_mutex);
2479 current->flags |= PF_WQ_WORKER;
2481 current->flags &= ~PF_WQ_WORKER;
2482 mutex_unlock(&wq_pool_attach_mutex);
2486 * worker_thread - the worker thread function
2489 * The worker thread function. All workers belong to a worker_pool -
2490 * either a per-cpu one or dynamic unbound one. These workers process all
2491 * work items regardless of their specific target workqueue. The only
2492 * exception is work items which belong to workqueues with a rescuer which
2493 * will be explained in rescuer_thread().
2497 static int worker_thread(void *__worker)
2499 struct worker *worker = __worker;
2500 struct worker_pool *pool = worker->pool;
2502 /* tell the scheduler that this is a workqueue worker */
2503 set_pf_worker(true);
2505 raw_spin_lock_irq(&pool->lock);
2507 /* am I supposed to die? */
2508 if (unlikely(worker->flags & WORKER_DIE)) {
2509 raw_spin_unlock_irq(&pool->lock);
2510 set_pf_worker(false);
2512 set_task_comm(worker->task, "kworker/dying");
2513 ida_free(&pool->worker_ida, worker->id);
2514 worker_detach_from_pool(worker);
2515 WARN_ON_ONCE(!list_empty(&worker->entry));
2520 worker_leave_idle(worker);
2522 /* no more worker necessary? */
2523 if (!need_more_worker(pool))
2526 /* do we need to manage? */
2527 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2531 * ->scheduled list can only be filled while a worker is
2532 * preparing to process a work or actually processing it.
2533 * Make sure nobody diddled with it while I was sleeping.
2535 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2538 * Finish PREP stage. We're guaranteed to have at least one idle
2539 * worker or that someone else has already assumed the manager
2540 * role. This is where @worker starts participating in concurrency
2541 * management if applicable and concurrency management is restored
2542 * after being rebound. See rebind_workers() for details.
2544 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2547 struct work_struct *work =
2548 list_first_entry(&pool->worklist,
2549 struct work_struct, entry);
2551 pool->watchdog_ts = jiffies;
2553 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2554 /* optimization path, not strictly necessary */
2555 process_one_work(worker, work);
2556 if (unlikely(!list_empty(&worker->scheduled)))
2557 process_scheduled_works(worker);
2559 move_linked_works(work, &worker->scheduled, NULL);
2560 process_scheduled_works(worker);
2562 } while (keep_working(pool));
2564 worker_set_flags(worker, WORKER_PREP);
2567 * pool->lock is held and there's no work to process and no need to
2568 * manage, sleep. Workers are woken up only while holding
2569 * pool->lock or from local cpu, so setting the current state
2570 * before releasing pool->lock is enough to prevent losing any
2573 worker_enter_idle(worker);
2574 __set_current_state(TASK_IDLE);
2575 raw_spin_unlock_irq(&pool->lock);
2581 * rescuer_thread - the rescuer thread function
2584 * Workqueue rescuer thread function. There's one rescuer for each
2585 * workqueue which has WQ_MEM_RECLAIM set.
2587 * Regular work processing on a pool may block trying to create a new
2588 * worker which uses GFP_KERNEL allocation which has slight chance of
2589 * developing into deadlock if some works currently on the same queue
2590 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2591 * the problem rescuer solves.
2593 * When such condition is possible, the pool summons rescuers of all
2594 * workqueues which have works queued on the pool and let them process
2595 * those works so that forward progress can be guaranteed.
2597 * This should happen rarely.
2601 static int rescuer_thread(void *__rescuer)
2603 struct worker *rescuer = __rescuer;
2604 struct workqueue_struct *wq = rescuer->rescue_wq;
2605 struct list_head *scheduled = &rescuer->scheduled;
2608 set_user_nice(current, RESCUER_NICE_LEVEL);
2611 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2612 * doesn't participate in concurrency management.
2614 set_pf_worker(true);
2616 set_current_state(TASK_IDLE);
2619 * By the time the rescuer is requested to stop, the workqueue
2620 * shouldn't have any work pending, but @wq->maydays may still have
2621 * pwq(s) queued. This can happen by non-rescuer workers consuming
2622 * all the work items before the rescuer got to them. Go through
2623 * @wq->maydays processing before acting on should_stop so that the
2624 * list is always empty on exit.
2626 should_stop = kthread_should_stop();
2628 /* see whether any pwq is asking for help */
2629 raw_spin_lock_irq(&wq_mayday_lock);
2631 while (!list_empty(&wq->maydays)) {
2632 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2633 struct pool_workqueue, mayday_node);
2634 struct worker_pool *pool = pwq->pool;
2635 struct work_struct *work, *n;
2638 __set_current_state(TASK_RUNNING);
2639 list_del_init(&pwq->mayday_node);
2641 raw_spin_unlock_irq(&wq_mayday_lock);
2643 worker_attach_to_pool(rescuer, pool);
2645 raw_spin_lock_irq(&pool->lock);
2648 * Slurp in all works issued via this workqueue and
2651 WARN_ON_ONCE(!list_empty(scheduled));
2652 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2653 if (get_work_pwq(work) == pwq) {
2655 pool->watchdog_ts = jiffies;
2656 move_linked_works(work, scheduled, &n);
2661 if (!list_empty(scheduled)) {
2662 process_scheduled_works(rescuer);
2665 * The above execution of rescued work items could
2666 * have created more to rescue through
2667 * pwq_activate_first_inactive() or chained
2668 * queueing. Let's put @pwq back on mayday list so
2669 * that such back-to-back work items, which may be
2670 * being used to relieve memory pressure, don't
2671 * incur MAYDAY_INTERVAL delay inbetween.
2673 if (pwq->nr_active && need_to_create_worker(pool)) {
2674 raw_spin_lock(&wq_mayday_lock);
2676 * Queue iff we aren't racing destruction
2677 * and somebody else hasn't queued it already.
2679 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2681 list_add_tail(&pwq->mayday_node, &wq->maydays);
2683 raw_spin_unlock(&wq_mayday_lock);
2688 * Put the reference grabbed by send_mayday(). @pool won't
2689 * go away while we're still attached to it.
2694 * Leave this pool. If need_more_worker() is %true, notify a
2695 * regular worker; otherwise, we end up with 0 concurrency
2696 * and stalling the execution.
2698 if (need_more_worker(pool))
2699 wake_up_worker(pool);
2701 raw_spin_unlock_irq(&pool->lock);
2703 worker_detach_from_pool(rescuer);
2705 raw_spin_lock_irq(&wq_mayday_lock);
2708 raw_spin_unlock_irq(&wq_mayday_lock);
2711 __set_current_state(TASK_RUNNING);
2712 set_pf_worker(false);
2716 /* rescuers should never participate in concurrency management */
2717 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2723 * check_flush_dependency - check for flush dependency sanity
2724 * @target_wq: workqueue being flushed
2725 * @target_work: work item being flushed (NULL for workqueue flushes)
2727 * %current is trying to flush the whole @target_wq or @target_work on it.
2728 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2729 * reclaiming memory or running on a workqueue which doesn't have
2730 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2733 static void check_flush_dependency(struct workqueue_struct *target_wq,
2734 struct work_struct *target_work)
2736 work_func_t target_func = target_work ? target_work->func : NULL;
2737 struct worker *worker;
2739 if (target_wq->flags & WQ_MEM_RECLAIM)
2742 worker = current_wq_worker();
2744 WARN_ONCE(current->flags & PF_MEMALLOC,
2745 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2746 current->pid, current->comm, target_wq->name, target_func);
2747 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2748 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2749 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2750 worker->current_pwq->wq->name, worker->current_func,
2751 target_wq->name, target_func);
2755 struct work_struct work;
2756 struct completion done;
2757 struct task_struct *task; /* purely informational */
2760 static void wq_barrier_func(struct work_struct *work)
2762 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2763 complete(&barr->done);
2767 * insert_wq_barrier - insert a barrier work
2768 * @pwq: pwq to insert barrier into
2769 * @barr: wq_barrier to insert
2770 * @target: target work to attach @barr to
2771 * @worker: worker currently executing @target, NULL if @target is not executing
2773 * @barr is linked to @target such that @barr is completed only after
2774 * @target finishes execution. Please note that the ordering
2775 * guarantee is observed only with respect to @target and on the local
2778 * Currently, a queued barrier can't be canceled. This is because
2779 * try_to_grab_pending() can't determine whether the work to be
2780 * grabbed is at the head of the queue and thus can't clear LINKED
2781 * flag of the previous work while there must be a valid next work
2782 * after a work with LINKED flag set.
2784 * Note that when @worker is non-NULL, @target may be modified
2785 * underneath us, so we can't reliably determine pwq from @target.
2788 * raw_spin_lock_irq(pool->lock).
2790 static void insert_wq_barrier(struct pool_workqueue *pwq,
2791 struct wq_barrier *barr,
2792 struct work_struct *target, struct worker *worker)
2794 unsigned int work_flags = 0;
2795 unsigned int work_color;
2796 struct list_head *head;
2799 * debugobject calls are safe here even with pool->lock locked
2800 * as we know for sure that this will not trigger any of the
2801 * checks and call back into the fixup functions where we
2804 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2805 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2807 init_completion_map(&barr->done, &target->lockdep_map);
2809 barr->task = current;
2811 /* The barrier work item does not participate in pwq->nr_active. */
2812 work_flags |= WORK_STRUCT_INACTIVE;
2815 * If @target is currently being executed, schedule the
2816 * barrier to the worker; otherwise, put it after @target.
2819 head = worker->scheduled.next;
2820 work_color = worker->current_color;
2822 unsigned long *bits = work_data_bits(target);
2824 head = target->entry.next;
2825 /* there can already be other linked works, inherit and set */
2826 work_flags |= *bits & WORK_STRUCT_LINKED;
2827 work_color = get_work_color(*bits);
2828 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2831 pwq->nr_in_flight[work_color]++;
2832 work_flags |= work_color_to_flags(work_color);
2834 debug_work_activate(&barr->work);
2835 insert_work(pwq, &barr->work, head, work_flags);
2839 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2840 * @wq: workqueue being flushed
2841 * @flush_color: new flush color, < 0 for no-op
2842 * @work_color: new work color, < 0 for no-op
2844 * Prepare pwqs for workqueue flushing.
2846 * If @flush_color is non-negative, flush_color on all pwqs should be
2847 * -1. If no pwq has in-flight commands at the specified color, all
2848 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2849 * has in flight commands, its pwq->flush_color is set to
2850 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2851 * wakeup logic is armed and %true is returned.
2853 * The caller should have initialized @wq->first_flusher prior to
2854 * calling this function with non-negative @flush_color. If
2855 * @flush_color is negative, no flush color update is done and %false
2858 * If @work_color is non-negative, all pwqs should have the same
2859 * work_color which is previous to @work_color and all will be
2860 * advanced to @work_color.
2863 * mutex_lock(wq->mutex).
2866 * %true if @flush_color >= 0 and there's something to flush. %false
2869 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2870 int flush_color, int work_color)
2873 struct pool_workqueue *pwq;
2875 if (flush_color >= 0) {
2876 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2877 atomic_set(&wq->nr_pwqs_to_flush, 1);
2880 for_each_pwq(pwq, wq) {
2881 struct worker_pool *pool = pwq->pool;
2883 raw_spin_lock_irq(&pool->lock);
2885 if (flush_color >= 0) {
2886 WARN_ON_ONCE(pwq->flush_color != -1);
2888 if (pwq->nr_in_flight[flush_color]) {
2889 pwq->flush_color = flush_color;
2890 atomic_inc(&wq->nr_pwqs_to_flush);
2895 if (work_color >= 0) {
2896 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2897 pwq->work_color = work_color;
2900 raw_spin_unlock_irq(&pool->lock);
2903 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2904 complete(&wq->first_flusher->done);
2910 * __flush_workqueue - ensure that any scheduled work has run to completion.
2911 * @wq: workqueue to flush
2913 * This function sleeps until all work items which were queued on entry
2914 * have finished execution, but it is not livelocked by new incoming ones.
2916 void __flush_workqueue(struct workqueue_struct *wq)
2918 struct wq_flusher this_flusher = {
2919 .list = LIST_HEAD_INIT(this_flusher.list),
2921 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2925 if (WARN_ON(!wq_online))
2928 lock_map_acquire(&wq->lockdep_map);
2929 lock_map_release(&wq->lockdep_map);
2931 mutex_lock(&wq->mutex);
2934 * Start-to-wait phase
2936 next_color = work_next_color(wq->work_color);
2938 if (next_color != wq->flush_color) {
2940 * Color space is not full. The current work_color
2941 * becomes our flush_color and work_color is advanced
2944 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2945 this_flusher.flush_color = wq->work_color;
2946 wq->work_color = next_color;
2948 if (!wq->first_flusher) {
2949 /* no flush in progress, become the first flusher */
2950 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2952 wq->first_flusher = &this_flusher;
2954 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2956 /* nothing to flush, done */
2957 wq->flush_color = next_color;
2958 wq->first_flusher = NULL;
2963 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2964 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2965 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2969 * Oops, color space is full, wait on overflow queue.
2970 * The next flush completion will assign us
2971 * flush_color and transfer to flusher_queue.
2973 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2976 check_flush_dependency(wq, NULL);
2978 mutex_unlock(&wq->mutex);
2980 wait_for_completion(&this_flusher.done);
2983 * Wake-up-and-cascade phase
2985 * First flushers are responsible for cascading flushes and
2986 * handling overflow. Non-first flushers can simply return.
2988 if (READ_ONCE(wq->first_flusher) != &this_flusher)
2991 mutex_lock(&wq->mutex);
2993 /* we might have raced, check again with mutex held */
2994 if (wq->first_flusher != &this_flusher)
2997 WRITE_ONCE(wq->first_flusher, NULL);
2999 WARN_ON_ONCE(!list_empty(&this_flusher.list));
3000 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3003 struct wq_flusher *next, *tmp;
3005 /* complete all the flushers sharing the current flush color */
3006 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3007 if (next->flush_color != wq->flush_color)
3009 list_del_init(&next->list);
3010 complete(&next->done);
3013 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3014 wq->flush_color != work_next_color(wq->work_color));
3016 /* this flush_color is finished, advance by one */
3017 wq->flush_color = work_next_color(wq->flush_color);
3019 /* one color has been freed, handle overflow queue */
3020 if (!list_empty(&wq->flusher_overflow)) {
3022 * Assign the same color to all overflowed
3023 * flushers, advance work_color and append to
3024 * flusher_queue. This is the start-to-wait
3025 * phase for these overflowed flushers.
3027 list_for_each_entry(tmp, &wq->flusher_overflow, list)
3028 tmp->flush_color = wq->work_color;
3030 wq->work_color = work_next_color(wq->work_color);
3032 list_splice_tail_init(&wq->flusher_overflow,
3033 &wq->flusher_queue);
3034 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3037 if (list_empty(&wq->flusher_queue)) {
3038 WARN_ON_ONCE(wq->flush_color != wq->work_color);
3043 * Need to flush more colors. Make the next flusher
3044 * the new first flusher and arm pwqs.
3046 WARN_ON_ONCE(wq->flush_color == wq->work_color);
3047 WARN_ON_ONCE(wq->flush_color != next->flush_color);
3049 list_del_init(&next->list);
3050 wq->first_flusher = next;
3052 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
3056 * Meh... this color is already done, clear first
3057 * flusher and repeat cascading.
3059 wq->first_flusher = NULL;
3063 mutex_unlock(&wq->mutex);
3065 EXPORT_SYMBOL(__flush_workqueue);
3068 * drain_workqueue - drain a workqueue
3069 * @wq: workqueue to drain
3071 * Wait until the workqueue becomes empty. While draining is in progress,
3072 * only chain queueing is allowed. IOW, only currently pending or running
3073 * work items on @wq can queue further work items on it. @wq is flushed
3074 * repeatedly until it becomes empty. The number of flushing is determined
3075 * by the depth of chaining and should be relatively short. Whine if it
3078 void drain_workqueue(struct workqueue_struct *wq)
3080 unsigned int flush_cnt = 0;
3081 struct pool_workqueue *pwq;
3084 * __queue_work() needs to test whether there are drainers, is much
3085 * hotter than drain_workqueue() and already looks at @wq->flags.
3086 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
3088 mutex_lock(&wq->mutex);
3089 if (!wq->nr_drainers++)
3090 wq->flags |= __WQ_DRAINING;
3091 mutex_unlock(&wq->mutex);
3093 __flush_workqueue(wq);
3095 mutex_lock(&wq->mutex);
3097 for_each_pwq(pwq, wq) {
3100 raw_spin_lock_irq(&pwq->pool->lock);
3101 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
3102 raw_spin_unlock_irq(&pwq->pool->lock);
3107 if (++flush_cnt == 10 ||
3108 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3109 pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3110 wq->name, __func__, flush_cnt);
3112 mutex_unlock(&wq->mutex);
3116 if (!--wq->nr_drainers)
3117 wq->flags &= ~__WQ_DRAINING;
3118 mutex_unlock(&wq->mutex);
3120 EXPORT_SYMBOL_GPL(drain_workqueue);
3122 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3125 struct worker *worker = NULL;
3126 struct worker_pool *pool;
3127 struct pool_workqueue *pwq;
3132 pool = get_work_pool(work);
3138 raw_spin_lock_irq(&pool->lock);
3139 /* see the comment in try_to_grab_pending() with the same code */
3140 pwq = get_work_pwq(work);
3142 if (unlikely(pwq->pool != pool))
3145 worker = find_worker_executing_work(pool, work);
3148 pwq = worker->current_pwq;
3151 check_flush_dependency(pwq->wq, work);
3153 insert_wq_barrier(pwq, barr, work, worker);
3154 raw_spin_unlock_irq(&pool->lock);
3157 * Force a lock recursion deadlock when using flush_work() inside a
3158 * single-threaded or rescuer equipped workqueue.
3160 * For single threaded workqueues the deadlock happens when the work
3161 * is after the work issuing the flush_work(). For rescuer equipped
3162 * workqueues the deadlock happens when the rescuer stalls, blocking
3166 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3167 lock_map_acquire(&pwq->wq->lockdep_map);
3168 lock_map_release(&pwq->wq->lockdep_map);
3173 raw_spin_unlock_irq(&pool->lock);
3178 static bool __flush_work(struct work_struct *work, bool from_cancel)
3180 struct wq_barrier barr;
3182 if (WARN_ON(!wq_online))
3185 if (WARN_ON(!work->func))
3188 lock_map_acquire(&work->lockdep_map);
3189 lock_map_release(&work->lockdep_map);
3191 if (start_flush_work(work, &barr, from_cancel)) {
3192 wait_for_completion(&barr.done);
3193 destroy_work_on_stack(&barr.work);
3201 * flush_work - wait for a work to finish executing the last queueing instance
3202 * @work: the work to flush
3204 * Wait until @work has finished execution. @work is guaranteed to be idle
3205 * on return if it hasn't been requeued since flush started.
3208 * %true if flush_work() waited for the work to finish execution,
3209 * %false if it was already idle.
3211 bool flush_work(struct work_struct *work)
3213 return __flush_work(work, false);
3215 EXPORT_SYMBOL_GPL(flush_work);
3218 wait_queue_entry_t wait;
3219 struct work_struct *work;
3222 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3224 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3226 if (cwait->work != key)
3228 return autoremove_wake_function(wait, mode, sync, key);
3231 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3233 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3234 unsigned long flags;
3238 ret = try_to_grab_pending(work, is_dwork, &flags);
3240 * If someone else is already canceling, wait for it to
3241 * finish. flush_work() doesn't work for PREEMPT_NONE
3242 * because we may get scheduled between @work's completion
3243 * and the other canceling task resuming and clearing
3244 * CANCELING - flush_work() will return false immediately
3245 * as @work is no longer busy, try_to_grab_pending() will
3246 * return -ENOENT as @work is still being canceled and the
3247 * other canceling task won't be able to clear CANCELING as
3248 * we're hogging the CPU.
3250 * Let's wait for completion using a waitqueue. As this
3251 * may lead to the thundering herd problem, use a custom
3252 * wake function which matches @work along with exclusive
3255 if (unlikely(ret == -ENOENT)) {
3256 struct cwt_wait cwait;
3258 init_wait(&cwait.wait);
3259 cwait.wait.func = cwt_wakefn;
3262 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3263 TASK_UNINTERRUPTIBLE);
3264 if (work_is_canceling(work))
3266 finish_wait(&cancel_waitq, &cwait.wait);
3268 } while (unlikely(ret < 0));
3270 /* tell other tasks trying to grab @work to back off */
3271 mark_work_canceling(work);
3272 local_irq_restore(flags);
3275 * This allows canceling during early boot. We know that @work
3279 __flush_work(work, true);
3281 clear_work_data(work);
3284 * Paired with prepare_to_wait() above so that either
3285 * waitqueue_active() is visible here or !work_is_canceling() is
3289 if (waitqueue_active(&cancel_waitq))
3290 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3296 * cancel_work_sync - cancel a work and wait for it to finish
3297 * @work: the work to cancel
3299 * Cancel @work and wait for its execution to finish. This function
3300 * can be used even if the work re-queues itself or migrates to
3301 * another workqueue. On return from this function, @work is
3302 * guaranteed to be not pending or executing on any CPU.
3304 * cancel_work_sync(&delayed_work->work) must not be used for
3305 * delayed_work's. Use cancel_delayed_work_sync() instead.
3307 * The caller must ensure that the workqueue on which @work was last
3308 * queued can't be destroyed before this function returns.
3311 * %true if @work was pending, %false otherwise.
3313 bool cancel_work_sync(struct work_struct *work)
3315 return __cancel_work_timer(work, false);
3317 EXPORT_SYMBOL_GPL(cancel_work_sync);
3320 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3321 * @dwork: the delayed work to flush
3323 * Delayed timer is cancelled and the pending work is queued for
3324 * immediate execution. Like flush_work(), this function only
3325 * considers the last queueing instance of @dwork.
3328 * %true if flush_work() waited for the work to finish execution,
3329 * %false if it was already idle.
3331 bool flush_delayed_work(struct delayed_work *dwork)
3333 local_irq_disable();
3334 if (del_timer_sync(&dwork->timer))
3335 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3337 return flush_work(&dwork->work);
3339 EXPORT_SYMBOL(flush_delayed_work);
3342 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3343 * @rwork: the rcu work to flush
3346 * %true if flush_rcu_work() waited for the work to finish execution,
3347 * %false if it was already idle.
3349 bool flush_rcu_work(struct rcu_work *rwork)
3351 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3353 flush_work(&rwork->work);
3356 return flush_work(&rwork->work);
3359 EXPORT_SYMBOL(flush_rcu_work);
3361 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3363 unsigned long flags;
3367 ret = try_to_grab_pending(work, is_dwork, &flags);
3368 } while (unlikely(ret == -EAGAIN));
3370 if (unlikely(ret < 0))
3373 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3374 local_irq_restore(flags);
3379 * See cancel_delayed_work()
3381 bool cancel_work(struct work_struct *work)
3383 return __cancel_work(work, false);
3385 EXPORT_SYMBOL(cancel_work);
3388 * cancel_delayed_work - cancel a delayed work
3389 * @dwork: delayed_work to cancel
3391 * Kill off a pending delayed_work.
3393 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3397 * The work callback function may still be running on return, unless
3398 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3399 * use cancel_delayed_work_sync() to wait on it.
3401 * This function is safe to call from any context including IRQ handler.
3403 bool cancel_delayed_work(struct delayed_work *dwork)
3405 return __cancel_work(&dwork->work, true);
3407 EXPORT_SYMBOL(cancel_delayed_work);
3410 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3411 * @dwork: the delayed work cancel
3413 * This is cancel_work_sync() for delayed works.
3416 * %true if @dwork was pending, %false otherwise.
3418 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3420 return __cancel_work_timer(&dwork->work, true);
3422 EXPORT_SYMBOL(cancel_delayed_work_sync);
3425 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3426 * @func: the function to call
3428 * schedule_on_each_cpu() executes @func on each online CPU using the
3429 * system workqueue and blocks until all CPUs have completed.
3430 * schedule_on_each_cpu() is very slow.
3433 * 0 on success, -errno on failure.
3435 int schedule_on_each_cpu(work_func_t func)
3438 struct work_struct __percpu *works;
3440 works = alloc_percpu(struct work_struct);
3446 for_each_online_cpu(cpu) {
3447 struct work_struct *work = per_cpu_ptr(works, cpu);
3449 INIT_WORK(work, func);
3450 schedule_work_on(cpu, work);
3453 for_each_online_cpu(cpu)
3454 flush_work(per_cpu_ptr(works, cpu));
3462 * execute_in_process_context - reliably execute the routine with user context
3463 * @fn: the function to execute
3464 * @ew: guaranteed storage for the execute work structure (must
3465 * be available when the work executes)
3467 * Executes the function immediately if process context is available,
3468 * otherwise schedules the function for delayed execution.
3470 * Return: 0 - function was executed
3471 * 1 - function was scheduled for execution
3473 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3475 if (!in_interrupt()) {
3480 INIT_WORK(&ew->work, fn);
3481 schedule_work(&ew->work);
3485 EXPORT_SYMBOL_GPL(execute_in_process_context);
3488 * free_workqueue_attrs - free a workqueue_attrs
3489 * @attrs: workqueue_attrs to free
3491 * Undo alloc_workqueue_attrs().
3493 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3496 free_cpumask_var(attrs->cpumask);
3502 * alloc_workqueue_attrs - allocate a workqueue_attrs
3504 * Allocate a new workqueue_attrs, initialize with default settings and
3507 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3509 struct workqueue_attrs *alloc_workqueue_attrs(void)
3511 struct workqueue_attrs *attrs;
3513 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3516 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3519 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3522 free_workqueue_attrs(attrs);
3526 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3527 const struct workqueue_attrs *from)
3529 to->nice = from->nice;
3530 cpumask_copy(to->cpumask, from->cpumask);
3532 * Unlike hash and equality test, this function doesn't ignore
3533 * ->no_numa as it is used for both pool and wq attrs. Instead,
3534 * get_unbound_pool() explicitly clears ->no_numa after copying.
3536 to->no_numa = from->no_numa;
3539 /* hash value of the content of @attr */
3540 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3544 hash = jhash_1word(attrs->nice, hash);
3545 hash = jhash(cpumask_bits(attrs->cpumask),
3546 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3550 /* content equality test */
3551 static bool wqattrs_equal(const struct workqueue_attrs *a,
3552 const struct workqueue_attrs *b)
3554 if (a->nice != b->nice)
3556 if (!cpumask_equal(a->cpumask, b->cpumask))
3562 * init_worker_pool - initialize a newly zalloc'd worker_pool
3563 * @pool: worker_pool to initialize
3565 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3567 * Return: 0 on success, -errno on failure. Even on failure, all fields
3568 * inside @pool proper are initialized and put_unbound_pool() can be called
3569 * on @pool safely to release it.
3571 static int init_worker_pool(struct worker_pool *pool)
3573 raw_spin_lock_init(&pool->lock);
3576 pool->node = NUMA_NO_NODE;
3577 pool->flags |= POOL_DISASSOCIATED;
3578 pool->watchdog_ts = jiffies;
3579 INIT_LIST_HEAD(&pool->worklist);
3580 INIT_LIST_HEAD(&pool->idle_list);
3581 hash_init(pool->busy_hash);
3583 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3584 INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
3586 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3588 INIT_LIST_HEAD(&pool->workers);
3589 INIT_LIST_HEAD(&pool->dying_workers);
3591 ida_init(&pool->worker_ida);
3592 INIT_HLIST_NODE(&pool->hash_node);
3595 /* shouldn't fail above this point */
3596 pool->attrs = alloc_workqueue_attrs();
3602 #ifdef CONFIG_LOCKDEP
3603 static void wq_init_lockdep(struct workqueue_struct *wq)
3607 lockdep_register_key(&wq->key);
3608 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3610 lock_name = wq->name;
3612 wq->lock_name = lock_name;
3613 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3616 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3618 lockdep_unregister_key(&wq->key);
3621 static void wq_free_lockdep(struct workqueue_struct *wq)
3623 if (wq->lock_name != wq->name)
3624 kfree(wq->lock_name);
3627 static void wq_init_lockdep(struct workqueue_struct *wq)
3631 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3635 static void wq_free_lockdep(struct workqueue_struct *wq)
3640 static void rcu_free_wq(struct rcu_head *rcu)
3642 struct workqueue_struct *wq =
3643 container_of(rcu, struct workqueue_struct, rcu);
3645 wq_free_lockdep(wq);
3647 if (!(wq->flags & WQ_UNBOUND))
3648 free_percpu(wq->cpu_pwqs);
3650 free_workqueue_attrs(wq->unbound_attrs);
3655 static void rcu_free_pool(struct rcu_head *rcu)
3657 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3659 ida_destroy(&pool->worker_ida);
3660 free_workqueue_attrs(pool->attrs);
3665 * put_unbound_pool - put a worker_pool
3666 * @pool: worker_pool to put
3668 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3669 * safe manner. get_unbound_pool() calls this function on its failure path
3670 * and this function should be able to release pools which went through,
3671 * successfully or not, init_worker_pool().
3673 * Should be called with wq_pool_mutex held.
3675 static void put_unbound_pool(struct worker_pool *pool)
3677 DECLARE_COMPLETION_ONSTACK(detach_completion);
3678 struct list_head cull_list;
3679 struct worker *worker;
3681 INIT_LIST_HEAD(&cull_list);
3683 lockdep_assert_held(&wq_pool_mutex);
3689 if (WARN_ON(!(pool->cpu < 0)) ||
3690 WARN_ON(!list_empty(&pool->worklist)))
3693 /* release id and unhash */
3695 idr_remove(&worker_pool_idr, pool->id);
3696 hash_del(&pool->hash_node);
3699 * Become the manager and destroy all workers. This prevents
3700 * @pool's workers from blocking on attach_mutex. We're the last
3701 * manager and @pool gets freed with the flag set.
3703 * Having a concurrent manager is quite unlikely to happen as we can
3704 * only get here with
3705 * pwq->refcnt == pool->refcnt == 0
3706 * which implies no work queued to the pool, which implies no worker can
3707 * become the manager. However a worker could have taken the role of
3708 * manager before the refcnts dropped to 0, since maybe_create_worker()
3712 rcuwait_wait_event(&manager_wait,
3713 !(pool->flags & POOL_MANAGER_ACTIVE),
3714 TASK_UNINTERRUPTIBLE);
3716 mutex_lock(&wq_pool_attach_mutex);
3717 raw_spin_lock_irq(&pool->lock);
3718 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
3719 pool->flags |= POOL_MANAGER_ACTIVE;
3722 raw_spin_unlock_irq(&pool->lock);
3723 mutex_unlock(&wq_pool_attach_mutex);
3726 while ((worker = first_idle_worker(pool)))
3727 set_worker_dying(worker, &cull_list);
3728 WARN_ON(pool->nr_workers || pool->nr_idle);
3729 raw_spin_unlock_irq(&pool->lock);
3731 wake_dying_workers(&cull_list);
3733 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
3734 pool->detach_completion = &detach_completion;
3735 mutex_unlock(&wq_pool_attach_mutex);
3737 if (pool->detach_completion)
3738 wait_for_completion(pool->detach_completion);
3740 /* shut down the timers */
3741 del_timer_sync(&pool->idle_timer);
3742 cancel_work_sync(&pool->idle_cull_work);
3743 del_timer_sync(&pool->mayday_timer);
3745 /* RCU protected to allow dereferences from get_work_pool() */
3746 call_rcu(&pool->rcu, rcu_free_pool);
3750 * get_unbound_pool - get a worker_pool with the specified attributes
3751 * @attrs: the attributes of the worker_pool to get
3753 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3754 * reference count and return it. If there already is a matching
3755 * worker_pool, it will be used; otherwise, this function attempts to
3758 * Should be called with wq_pool_mutex held.
3760 * Return: On success, a worker_pool with the same attributes as @attrs.
3761 * On failure, %NULL.
3763 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3765 u32 hash = wqattrs_hash(attrs);
3766 struct worker_pool *pool;
3768 int target_node = NUMA_NO_NODE;
3770 lockdep_assert_held(&wq_pool_mutex);
3772 /* do we already have a matching pool? */
3773 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3774 if (wqattrs_equal(pool->attrs, attrs)) {
3780 /* if cpumask is contained inside a NUMA node, we belong to that node */
3781 if (wq_numa_enabled) {
3782 for_each_node(node) {
3783 if (cpumask_subset(attrs->cpumask,
3784 wq_numa_possible_cpumask[node])) {
3791 /* nope, create a new one */
3792 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3793 if (!pool || init_worker_pool(pool) < 0)
3796 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3797 copy_workqueue_attrs(pool->attrs, attrs);
3798 pool->node = target_node;
3801 * no_numa isn't a worker_pool attribute, always clear it. See
3802 * 'struct workqueue_attrs' comments for detail.
3804 pool->attrs->no_numa = false;
3806 if (worker_pool_assign_id(pool) < 0)
3809 /* create and start the initial worker */
3810 if (wq_online && !create_worker(pool))
3814 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3819 put_unbound_pool(pool);
3823 static void rcu_free_pwq(struct rcu_head *rcu)
3825 kmem_cache_free(pwq_cache,
3826 container_of(rcu, struct pool_workqueue, rcu));
3830 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3831 * and needs to be destroyed.
3833 static void pwq_unbound_release_workfn(struct work_struct *work)
3835 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3836 unbound_release_work);
3837 struct workqueue_struct *wq = pwq->wq;
3838 struct worker_pool *pool = pwq->pool;
3839 bool is_last = false;
3842 * when @pwq is not linked, it doesn't hold any reference to the
3843 * @wq, and @wq is invalid to access.
3845 if (!list_empty(&pwq->pwqs_node)) {
3846 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3849 mutex_lock(&wq->mutex);
3850 list_del_rcu(&pwq->pwqs_node);
3851 is_last = list_empty(&wq->pwqs);
3852 mutex_unlock(&wq->mutex);
3855 mutex_lock(&wq_pool_mutex);
3856 put_unbound_pool(pool);
3857 mutex_unlock(&wq_pool_mutex);
3859 call_rcu(&pwq->rcu, rcu_free_pwq);
3862 * If we're the last pwq going away, @wq is already dead and no one
3863 * is gonna access it anymore. Schedule RCU free.
3866 wq_unregister_lockdep(wq);
3867 call_rcu(&wq->rcu, rcu_free_wq);
3872 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3873 * @pwq: target pool_workqueue
3875 * If @pwq isn't freezing, set @pwq->max_active to the associated
3876 * workqueue's saved_max_active and activate inactive work items
3877 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3879 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3881 struct workqueue_struct *wq = pwq->wq;
3882 bool freezable = wq->flags & WQ_FREEZABLE;
3883 unsigned long flags;
3885 /* for @wq->saved_max_active */
3886 lockdep_assert_held(&wq->mutex);
3888 /* fast exit for non-freezable wqs */
3889 if (!freezable && pwq->max_active == wq->saved_max_active)
3892 /* this function can be called during early boot w/ irq disabled */
3893 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3896 * During [un]freezing, the caller is responsible for ensuring that
3897 * this function is called at least once after @workqueue_freezing
3898 * is updated and visible.
3900 if (!freezable || !workqueue_freezing) {
3903 pwq->max_active = wq->saved_max_active;
3905 while (!list_empty(&pwq->inactive_works) &&
3906 pwq->nr_active < pwq->max_active) {
3907 pwq_activate_first_inactive(pwq);
3912 * Need to kick a worker after thawed or an unbound wq's
3913 * max_active is bumped. In realtime scenarios, always kicking a
3914 * worker will cause interference on the isolated cpu cores, so
3915 * let's kick iff work items were activated.
3918 wake_up_worker(pwq->pool);
3920 pwq->max_active = 0;
3923 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3926 /* initialize newly allocated @pwq which is associated with @wq and @pool */
3927 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3928 struct worker_pool *pool)
3930 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3932 memset(pwq, 0, sizeof(*pwq));
3936 pwq->flush_color = -1;
3938 INIT_LIST_HEAD(&pwq->inactive_works);
3939 INIT_LIST_HEAD(&pwq->pwqs_node);
3940 INIT_LIST_HEAD(&pwq->mayday_node);
3941 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3944 /* sync @pwq with the current state of its associated wq and link it */
3945 static void link_pwq(struct pool_workqueue *pwq)
3947 struct workqueue_struct *wq = pwq->wq;
3949 lockdep_assert_held(&wq->mutex);
3951 /* may be called multiple times, ignore if already linked */
3952 if (!list_empty(&pwq->pwqs_node))
3955 /* set the matching work_color */
3956 pwq->work_color = wq->work_color;
3958 /* sync max_active to the current setting */
3959 pwq_adjust_max_active(pwq);
3962 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3965 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3966 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3967 const struct workqueue_attrs *attrs)
3969 struct worker_pool *pool;
3970 struct pool_workqueue *pwq;
3972 lockdep_assert_held(&wq_pool_mutex);
3974 pool = get_unbound_pool(attrs);
3978 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3980 put_unbound_pool(pool);
3984 init_pwq(pwq, wq, pool);
3989 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3990 * @attrs: the wq_attrs of the default pwq of the target workqueue
3991 * @node: the target NUMA node
3992 * @cpu_going_down: if >= 0, the CPU to consider as offline
3993 * @cpumask: outarg, the resulting cpumask
3995 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3996 * @cpu_going_down is >= 0, that cpu is considered offline during
3997 * calculation. The result is stored in @cpumask.
3999 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
4000 * enabled and @node has online CPUs requested by @attrs, the returned
4001 * cpumask is the intersection of the possible CPUs of @node and
4004 * The caller is responsible for ensuring that the cpumask of @node stays
4007 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
4010 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
4011 int cpu_going_down, cpumask_t *cpumask)
4013 if (!wq_numa_enabled || attrs->no_numa)
4016 /* does @node have any online CPUs @attrs wants? */
4017 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
4018 if (cpu_going_down >= 0)
4019 cpumask_clear_cpu(cpu_going_down, cpumask);
4021 if (cpumask_empty(cpumask))
4024 /* yeap, return possible CPUs in @node that @attrs wants */
4025 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
4027 if (cpumask_empty(cpumask)) {
4028 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4029 "possible intersect\n");
4033 return !cpumask_equal(cpumask, attrs->cpumask);
4036 cpumask_copy(cpumask, attrs->cpumask);
4040 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
4041 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
4043 struct pool_workqueue *pwq)
4045 struct pool_workqueue *old_pwq;
4047 lockdep_assert_held(&wq_pool_mutex);
4048 lockdep_assert_held(&wq->mutex);
4050 /* link_pwq() can handle duplicate calls */
4053 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4054 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
4058 /* context to store the prepared attrs & pwqs before applying */
4059 struct apply_wqattrs_ctx {
4060 struct workqueue_struct *wq; /* target workqueue */
4061 struct workqueue_attrs *attrs; /* attrs to apply */
4062 struct list_head list; /* queued for batching commit */
4063 struct pool_workqueue *dfl_pwq;
4064 struct pool_workqueue *pwq_tbl[];
4067 /* free the resources after success or abort */
4068 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4074 put_pwq_unlocked(ctx->pwq_tbl[node]);
4075 put_pwq_unlocked(ctx->dfl_pwq);
4077 free_workqueue_attrs(ctx->attrs);
4083 /* allocate the attrs and pwqs for later installation */
4084 static struct apply_wqattrs_ctx *
4085 apply_wqattrs_prepare(struct workqueue_struct *wq,
4086 const struct workqueue_attrs *attrs,
4087 const cpumask_var_t unbound_cpumask)
4089 struct apply_wqattrs_ctx *ctx;
4090 struct workqueue_attrs *new_attrs, *tmp_attrs;
4093 lockdep_assert_held(&wq_pool_mutex);
4095 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
4097 new_attrs = alloc_workqueue_attrs();
4098 tmp_attrs = alloc_workqueue_attrs();
4099 if (!ctx || !new_attrs || !tmp_attrs)
4103 * Calculate the attrs of the default pwq with unbound_cpumask
4104 * which is wq_unbound_cpumask or to set to wq_unbound_cpumask.
4105 * If the user configured cpumask doesn't overlap with the
4106 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
4108 copy_workqueue_attrs(new_attrs, attrs);
4109 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbound_cpumask);
4110 if (unlikely(cpumask_empty(new_attrs->cpumask)))
4111 cpumask_copy(new_attrs->cpumask, unbound_cpumask);
4114 * We may create multiple pwqs with differing cpumasks. Make a
4115 * copy of @new_attrs which will be modified and used to obtain
4118 copy_workqueue_attrs(tmp_attrs, new_attrs);
4121 * If something goes wrong during CPU up/down, we'll fall back to
4122 * the default pwq covering whole @attrs->cpumask. Always create
4123 * it even if we don't use it immediately.
4125 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4129 for_each_node(node) {
4130 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
4131 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
4132 if (!ctx->pwq_tbl[node])
4135 ctx->dfl_pwq->refcnt++;
4136 ctx->pwq_tbl[node] = ctx->dfl_pwq;
4140 /* save the user configured attrs and sanitize it. */
4141 copy_workqueue_attrs(new_attrs, attrs);
4142 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4143 ctx->attrs = new_attrs;
4146 free_workqueue_attrs(tmp_attrs);
4150 free_workqueue_attrs(tmp_attrs);
4151 free_workqueue_attrs(new_attrs);
4152 apply_wqattrs_cleanup(ctx);
4156 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
4157 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4161 /* all pwqs have been created successfully, let's install'em */
4162 mutex_lock(&ctx->wq->mutex);
4164 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4166 /* save the previous pwq and install the new one */
4168 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4169 ctx->pwq_tbl[node]);
4171 /* @dfl_pwq might not have been used, ensure it's linked */
4172 link_pwq(ctx->dfl_pwq);
4173 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
4175 mutex_unlock(&ctx->wq->mutex);
4178 static void apply_wqattrs_lock(void)
4180 /* CPUs should stay stable across pwq creations and installations */
4182 mutex_lock(&wq_pool_mutex);
4185 static void apply_wqattrs_unlock(void)
4187 mutex_unlock(&wq_pool_mutex);
4191 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4192 const struct workqueue_attrs *attrs)
4194 struct apply_wqattrs_ctx *ctx;
4196 /* only unbound workqueues can change attributes */
4197 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4200 /* creating multiple pwqs breaks ordering guarantee */
4201 if (!list_empty(&wq->pwqs)) {
4202 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4205 wq->flags &= ~__WQ_ORDERED;
4208 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
4212 /* the ctx has been prepared successfully, let's commit it */
4213 apply_wqattrs_commit(ctx);
4214 apply_wqattrs_cleanup(ctx);
4220 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4221 * @wq: the target workqueue
4222 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4224 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
4225 * machines, this function maps a separate pwq to each NUMA node with
4226 * possibles CPUs in @attrs->cpumask so that work items are affine to the
4227 * NUMA node it was issued on. Older pwqs are released as in-flight work
4228 * items finish. Note that a work item which repeatedly requeues itself
4229 * back-to-back will stay on its current pwq.
4231 * Performs GFP_KERNEL allocations.
4233 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4235 * Return: 0 on success and -errno on failure.
4237 int apply_workqueue_attrs(struct workqueue_struct *wq,
4238 const struct workqueue_attrs *attrs)
4242 lockdep_assert_cpus_held();
4244 mutex_lock(&wq_pool_mutex);
4245 ret = apply_workqueue_attrs_locked(wq, attrs);
4246 mutex_unlock(&wq_pool_mutex);
4252 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4253 * @wq: the target workqueue
4254 * @cpu: the CPU coming up or going down
4255 * @online: whether @cpu is coming up or going down
4257 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4258 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
4261 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4262 * falls back to @wq->dfl_pwq which may not be optimal but is always
4265 * Note that when the last allowed CPU of a NUMA node goes offline for a
4266 * workqueue with a cpumask spanning multiple nodes, the workers which were
4267 * already executing the work items for the workqueue will lose their CPU
4268 * affinity and may execute on any CPU. This is similar to how per-cpu
4269 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
4270 * affinity, it's the user's responsibility to flush the work item from
4273 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4276 int node = cpu_to_node(cpu);
4277 int cpu_off = online ? -1 : cpu;
4278 struct pool_workqueue *old_pwq = NULL, *pwq;
4279 struct workqueue_attrs *target_attrs;
4282 lockdep_assert_held(&wq_pool_mutex);
4284 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4285 wq->unbound_attrs->no_numa)
4289 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4290 * Let's use a preallocated one. The following buf is protected by
4291 * CPU hotplug exclusion.
4293 target_attrs = wq_update_unbound_numa_attrs_buf;
4294 cpumask = target_attrs->cpumask;
4296 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4297 pwq = unbound_pwq_by_node(wq, node);
4300 * Let's determine what needs to be done. If the target cpumask is
4301 * different from the default pwq's, we need to compare it to @pwq's
4302 * and create a new one if they don't match. If the target cpumask
4303 * equals the default pwq's, the default pwq should be used.
4305 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4306 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4312 /* create a new pwq */
4313 pwq = alloc_unbound_pwq(wq, target_attrs);
4315 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4320 /* Install the new pwq. */
4321 mutex_lock(&wq->mutex);
4322 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4326 mutex_lock(&wq->mutex);
4327 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4328 get_pwq(wq->dfl_pwq);
4329 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4330 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4332 mutex_unlock(&wq->mutex);
4333 put_pwq_unlocked(old_pwq);
4336 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4338 bool highpri = wq->flags & WQ_HIGHPRI;
4341 if (!(wq->flags & WQ_UNBOUND)) {
4342 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4346 for_each_possible_cpu(cpu) {
4347 struct pool_workqueue *pwq =
4348 per_cpu_ptr(wq->cpu_pwqs, cpu);
4349 struct worker_pool *cpu_pools =
4350 per_cpu(cpu_worker_pools, cpu);
4352 init_pwq(pwq, wq, &cpu_pools[highpri]);
4354 mutex_lock(&wq->mutex);
4356 mutex_unlock(&wq->mutex);
4362 if (wq->flags & __WQ_ORDERED) {
4363 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4364 /* there should only be single pwq for ordering guarantee */
4365 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4366 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4367 "ordering guarantee broken for workqueue %s\n", wq->name);
4369 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4376 static int wq_clamp_max_active(int max_active, unsigned int flags,
4379 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4381 if (max_active < 1 || max_active > lim)
4382 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4383 max_active, name, 1, lim);
4385 return clamp_val(max_active, 1, lim);
4389 * Workqueues which may be used during memory reclaim should have a rescuer
4390 * to guarantee forward progress.
4392 static int init_rescuer(struct workqueue_struct *wq)
4394 struct worker *rescuer;
4397 if (!(wq->flags & WQ_MEM_RECLAIM))
4400 rescuer = alloc_worker(NUMA_NO_NODE);
4402 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
4407 rescuer->rescue_wq = wq;
4408 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4409 if (IS_ERR(rescuer->task)) {
4410 ret = PTR_ERR(rescuer->task);
4411 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
4412 wq->name, ERR_PTR(ret));
4417 wq->rescuer = rescuer;
4418 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4419 wake_up_process(rescuer->task);
4425 struct workqueue_struct *alloc_workqueue(const char *fmt,
4427 int max_active, ...)
4429 size_t tbl_size = 0;
4431 struct workqueue_struct *wq;
4432 struct pool_workqueue *pwq;
4435 * Unbound && max_active == 1 used to imply ordered, which is no
4436 * longer the case on NUMA machines due to per-node pools. While
4437 * alloc_ordered_workqueue() is the right way to create an ordered
4438 * workqueue, keep the previous behavior to avoid subtle breakages
4441 if ((flags & WQ_UNBOUND) && max_active == 1)
4442 flags |= __WQ_ORDERED;
4444 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4445 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4446 flags |= WQ_UNBOUND;
4448 /* allocate wq and format name */
4449 if (flags & WQ_UNBOUND)
4450 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4452 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4456 if (flags & WQ_UNBOUND) {
4457 wq->unbound_attrs = alloc_workqueue_attrs();
4458 if (!wq->unbound_attrs)
4462 va_start(args, max_active);
4463 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4466 max_active = max_active ?: WQ_DFL_ACTIVE;
4467 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4471 wq->saved_max_active = max_active;
4472 mutex_init(&wq->mutex);
4473 atomic_set(&wq->nr_pwqs_to_flush, 0);
4474 INIT_LIST_HEAD(&wq->pwqs);
4475 INIT_LIST_HEAD(&wq->flusher_queue);
4476 INIT_LIST_HEAD(&wq->flusher_overflow);
4477 INIT_LIST_HEAD(&wq->maydays);
4479 wq_init_lockdep(wq);
4480 INIT_LIST_HEAD(&wq->list);
4482 if (alloc_and_link_pwqs(wq) < 0)
4483 goto err_unreg_lockdep;
4485 if (wq_online && init_rescuer(wq) < 0)
4488 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4492 * wq_pool_mutex protects global freeze state and workqueues list.
4493 * Grab it, adjust max_active and add the new @wq to workqueues
4496 mutex_lock(&wq_pool_mutex);
4498 mutex_lock(&wq->mutex);
4499 for_each_pwq(pwq, wq)
4500 pwq_adjust_max_active(pwq);
4501 mutex_unlock(&wq->mutex);
4503 list_add_tail_rcu(&wq->list, &workqueues);
4505 mutex_unlock(&wq_pool_mutex);
4510 wq_unregister_lockdep(wq);
4511 wq_free_lockdep(wq);
4513 free_workqueue_attrs(wq->unbound_attrs);
4517 destroy_workqueue(wq);
4520 EXPORT_SYMBOL_GPL(alloc_workqueue);
4522 static bool pwq_busy(struct pool_workqueue *pwq)
4526 for (i = 0; i < WORK_NR_COLORS; i++)
4527 if (pwq->nr_in_flight[i])
4530 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4532 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4539 * destroy_workqueue - safely terminate a workqueue
4540 * @wq: target workqueue
4542 * Safely destroy a workqueue. All work currently pending will be done first.
4544 void destroy_workqueue(struct workqueue_struct *wq)
4546 struct pool_workqueue *pwq;
4550 * Remove it from sysfs first so that sanity check failure doesn't
4551 * lead to sysfs name conflicts.
4553 workqueue_sysfs_unregister(wq);
4555 /* mark the workqueue destruction is in progress */
4556 mutex_lock(&wq->mutex);
4557 wq->flags |= __WQ_DESTROYING;
4558 mutex_unlock(&wq->mutex);
4560 /* drain it before proceeding with destruction */
4561 drain_workqueue(wq);
4563 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4565 struct worker *rescuer = wq->rescuer;
4567 /* this prevents new queueing */
4568 raw_spin_lock_irq(&wq_mayday_lock);
4570 raw_spin_unlock_irq(&wq_mayday_lock);
4572 /* rescuer will empty maydays list before exiting */
4573 kthread_stop(rescuer->task);
4578 * Sanity checks - grab all the locks so that we wait for all
4579 * in-flight operations which may do put_pwq().
4581 mutex_lock(&wq_pool_mutex);
4582 mutex_lock(&wq->mutex);
4583 for_each_pwq(pwq, wq) {
4584 raw_spin_lock_irq(&pwq->pool->lock);
4585 if (WARN_ON(pwq_busy(pwq))) {
4586 pr_warn("%s: %s has the following busy pwq\n",
4587 __func__, wq->name);
4589 raw_spin_unlock_irq(&pwq->pool->lock);
4590 mutex_unlock(&wq->mutex);
4591 mutex_unlock(&wq_pool_mutex);
4592 show_one_workqueue(wq);
4595 raw_spin_unlock_irq(&pwq->pool->lock);
4597 mutex_unlock(&wq->mutex);
4600 * wq list is used to freeze wq, remove from list after
4601 * flushing is complete in case freeze races us.
4603 list_del_rcu(&wq->list);
4604 mutex_unlock(&wq_pool_mutex);
4606 if (!(wq->flags & WQ_UNBOUND)) {
4607 wq_unregister_lockdep(wq);
4609 * The base ref is never dropped on per-cpu pwqs. Directly
4610 * schedule RCU free.
4612 call_rcu(&wq->rcu, rcu_free_wq);
4615 * We're the sole accessor of @wq at this point. Directly
4616 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4617 * @wq will be freed when the last pwq is released.
4619 for_each_node(node) {
4620 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4621 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4622 put_pwq_unlocked(pwq);
4626 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4627 * put. Don't access it afterwards.
4631 put_pwq_unlocked(pwq);
4634 EXPORT_SYMBOL_GPL(destroy_workqueue);
4637 * workqueue_set_max_active - adjust max_active of a workqueue
4638 * @wq: target workqueue
4639 * @max_active: new max_active value.
4641 * Set max_active of @wq to @max_active.
4644 * Don't call from IRQ context.
4646 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4648 struct pool_workqueue *pwq;
4650 /* disallow meddling with max_active for ordered workqueues */
4651 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4654 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4656 mutex_lock(&wq->mutex);
4658 wq->flags &= ~__WQ_ORDERED;
4659 wq->saved_max_active = max_active;
4661 for_each_pwq(pwq, wq)
4662 pwq_adjust_max_active(pwq);
4664 mutex_unlock(&wq->mutex);
4666 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4669 * current_work - retrieve %current task's work struct
4671 * Determine if %current task is a workqueue worker and what it's working on.
4672 * Useful to find out the context that the %current task is running in.
4674 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4676 struct work_struct *current_work(void)
4678 struct worker *worker = current_wq_worker();
4680 return worker ? worker->current_work : NULL;
4682 EXPORT_SYMBOL(current_work);
4685 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4687 * Determine whether %current is a workqueue rescuer. Can be used from
4688 * work functions to determine whether it's being run off the rescuer task.
4690 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4692 bool current_is_workqueue_rescuer(void)
4694 struct worker *worker = current_wq_worker();
4696 return worker && worker->rescue_wq;
4700 * workqueue_congested - test whether a workqueue is congested
4701 * @cpu: CPU in question
4702 * @wq: target workqueue
4704 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4705 * no synchronization around this function and the test result is
4706 * unreliable and only useful as advisory hints or for debugging.
4708 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4709 * Note that both per-cpu and unbound workqueues may be associated with
4710 * multiple pool_workqueues which have separate congested states. A
4711 * workqueue being congested on one CPU doesn't mean the workqueue is also
4712 * contested on other CPUs / NUMA nodes.
4715 * %true if congested, %false otherwise.
4717 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4719 struct pool_workqueue *pwq;
4725 if (cpu == WORK_CPU_UNBOUND)
4726 cpu = smp_processor_id();
4728 if (!(wq->flags & WQ_UNBOUND))
4729 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4731 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4733 ret = !list_empty(&pwq->inactive_works);
4739 EXPORT_SYMBOL_GPL(workqueue_congested);
4742 * work_busy - test whether a work is currently pending or running
4743 * @work: the work to be tested
4745 * Test whether @work is currently pending or running. There is no
4746 * synchronization around this function and the test result is
4747 * unreliable and only useful as advisory hints or for debugging.
4750 * OR'd bitmask of WORK_BUSY_* bits.
4752 unsigned int work_busy(struct work_struct *work)
4754 struct worker_pool *pool;
4755 unsigned long flags;
4756 unsigned int ret = 0;
4758 if (work_pending(work))
4759 ret |= WORK_BUSY_PENDING;
4762 pool = get_work_pool(work);
4764 raw_spin_lock_irqsave(&pool->lock, flags);
4765 if (find_worker_executing_work(pool, work))
4766 ret |= WORK_BUSY_RUNNING;
4767 raw_spin_unlock_irqrestore(&pool->lock, flags);
4773 EXPORT_SYMBOL_GPL(work_busy);
4776 * set_worker_desc - set description for the current work item
4777 * @fmt: printf-style format string
4778 * @...: arguments for the format string
4780 * This function can be called by a running work function to describe what
4781 * the work item is about. If the worker task gets dumped, this
4782 * information will be printed out together to help debugging. The
4783 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4785 void set_worker_desc(const char *fmt, ...)
4787 struct worker *worker = current_wq_worker();
4791 va_start(args, fmt);
4792 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4796 EXPORT_SYMBOL_GPL(set_worker_desc);
4799 * print_worker_info - print out worker information and description
4800 * @log_lvl: the log level to use when printing
4801 * @task: target task
4803 * If @task is a worker and currently executing a work item, print out the
4804 * name of the workqueue being serviced and worker description set with
4805 * set_worker_desc() by the currently executing work item.
4807 * This function can be safely called on any task as long as the
4808 * task_struct itself is accessible. While safe, this function isn't
4809 * synchronized and may print out mixups or garbages of limited length.
4811 void print_worker_info(const char *log_lvl, struct task_struct *task)
4813 work_func_t *fn = NULL;
4814 char name[WQ_NAME_LEN] = { };
4815 char desc[WORKER_DESC_LEN] = { };
4816 struct pool_workqueue *pwq = NULL;
4817 struct workqueue_struct *wq = NULL;
4818 struct worker *worker;
4820 if (!(task->flags & PF_WQ_WORKER))
4824 * This function is called without any synchronization and @task
4825 * could be in any state. Be careful with dereferences.
4827 worker = kthread_probe_data(task);
4830 * Carefully copy the associated workqueue's workfn, name and desc.
4831 * Keep the original last '\0' in case the original is garbage.
4833 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4834 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4835 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4836 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4837 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4839 if (fn || name[0] || desc[0]) {
4840 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4841 if (strcmp(name, desc))
4842 pr_cont(" (%s)", desc);
4847 static void pr_cont_pool_info(struct worker_pool *pool)
4849 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4850 if (pool->node != NUMA_NO_NODE)
4851 pr_cont(" node=%d", pool->node);
4852 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4855 struct pr_cont_work_struct {
4861 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
4865 if (func == pcwsp->func) {
4869 if (pcwsp->ctr == 1)
4870 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
4872 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
4875 if ((long)func == -1L)
4877 pcwsp->comma = comma;
4882 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
4884 if (work->func == wq_barrier_func) {
4885 struct wq_barrier *barr;
4887 barr = container_of(work, struct wq_barrier, work);
4889 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
4890 pr_cont("%s BAR(%d)", comma ? "," : "",
4891 task_pid_nr(barr->task));
4894 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
4895 pr_cont_work_flush(comma, work->func, pcwsp);
4899 static void show_pwq(struct pool_workqueue *pwq)
4901 struct pr_cont_work_struct pcws = { .ctr = 0, };
4902 struct worker_pool *pool = pwq->pool;
4903 struct work_struct *work;
4904 struct worker *worker;
4905 bool has_in_flight = false, has_pending = false;
4908 pr_info(" pwq %d:", pool->id);
4909 pr_cont_pool_info(pool);
4911 pr_cont(" active=%d/%d refcnt=%d%s\n",
4912 pwq->nr_active, pwq->max_active, pwq->refcnt,
4913 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4915 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4916 if (worker->current_pwq == pwq) {
4917 has_in_flight = true;
4921 if (has_in_flight) {
4924 pr_info(" in-flight:");
4925 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4926 if (worker->current_pwq != pwq)
4929 pr_cont("%s %d%s:%ps", comma ? "," : "",
4930 task_pid_nr(worker->task),
4931 worker->rescue_wq ? "(RESCUER)" : "",
4932 worker->current_func);
4933 list_for_each_entry(work, &worker->scheduled, entry)
4934 pr_cont_work(false, work, &pcws);
4935 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
4941 list_for_each_entry(work, &pool->worklist, entry) {
4942 if (get_work_pwq(work) == pwq) {
4950 pr_info(" pending:");
4951 list_for_each_entry(work, &pool->worklist, entry) {
4952 if (get_work_pwq(work) != pwq)
4955 pr_cont_work(comma, work, &pcws);
4956 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4958 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
4962 if (!list_empty(&pwq->inactive_works)) {
4965 pr_info(" inactive:");
4966 list_for_each_entry(work, &pwq->inactive_works, entry) {
4967 pr_cont_work(comma, work, &pcws);
4968 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4970 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
4976 * show_one_workqueue - dump state of specified workqueue
4977 * @wq: workqueue whose state will be printed
4979 void show_one_workqueue(struct workqueue_struct *wq)
4981 struct pool_workqueue *pwq;
4983 unsigned long flags;
4985 for_each_pwq(pwq, wq) {
4986 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4991 if (idle) /* Nothing to print for idle workqueue */
4994 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4996 for_each_pwq(pwq, wq) {
4997 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4998 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5000 * Defer printing to avoid deadlocks in console
5001 * drivers that queue work while holding locks
5002 * also taken in their write paths.
5004 printk_deferred_enter();
5006 printk_deferred_exit();
5008 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5010 * We could be printing a lot from atomic context, e.g.
5011 * sysrq-t -> show_all_workqueues(). Avoid triggering
5014 touch_nmi_watchdog();
5020 * show_one_worker_pool - dump state of specified worker pool
5021 * @pool: worker pool whose state will be printed
5023 static void show_one_worker_pool(struct worker_pool *pool)
5025 struct worker *worker;
5027 unsigned long flags;
5028 unsigned long hung = 0;
5030 raw_spin_lock_irqsave(&pool->lock, flags);
5031 if (pool->nr_workers == pool->nr_idle)
5034 /* How long the first pending work is waiting for a worker. */
5035 if (!list_empty(&pool->worklist))
5036 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5039 * Defer printing to avoid deadlocks in console drivers that
5040 * queue work while holding locks also taken in their write
5043 printk_deferred_enter();
5044 pr_info("pool %d:", pool->id);
5045 pr_cont_pool_info(pool);
5046 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
5048 pr_cont(" manager: %d",
5049 task_pid_nr(pool->manager->task));
5050 list_for_each_entry(worker, &pool->idle_list, entry) {
5051 pr_cont(" %s%d", first ? "idle: " : "",
5052 task_pid_nr(worker->task));
5056 printk_deferred_exit();
5058 raw_spin_unlock_irqrestore(&pool->lock, flags);
5060 * We could be printing a lot from atomic context, e.g.
5061 * sysrq-t -> show_all_workqueues(). Avoid triggering
5064 touch_nmi_watchdog();
5069 * show_all_workqueues - dump workqueue state
5071 * Called from a sysrq handler and prints out all busy workqueues and pools.
5073 void show_all_workqueues(void)
5075 struct workqueue_struct *wq;
5076 struct worker_pool *pool;
5081 pr_info("Showing busy workqueues and worker pools:\n");
5083 list_for_each_entry_rcu(wq, &workqueues, list)
5084 show_one_workqueue(wq);
5086 for_each_pool(pool, pi)
5087 show_one_worker_pool(pool);
5093 * show_freezable_workqueues - dump freezable workqueue state
5095 * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5098 void show_freezable_workqueues(void)
5100 struct workqueue_struct *wq;
5104 pr_info("Showing freezable workqueues that are still busy:\n");
5106 list_for_each_entry_rcu(wq, &workqueues, list) {
5107 if (!(wq->flags & WQ_FREEZABLE))
5109 show_one_workqueue(wq);
5115 /* used to show worker information through /proc/PID/{comm,stat,status} */
5116 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5120 /* always show the actual comm */
5121 off = strscpy(buf, task->comm, size);
5125 /* stabilize PF_WQ_WORKER and worker pool association */
5126 mutex_lock(&wq_pool_attach_mutex);
5128 if (task->flags & PF_WQ_WORKER) {
5129 struct worker *worker = kthread_data(task);
5130 struct worker_pool *pool = worker->pool;
5133 raw_spin_lock_irq(&pool->lock);
5135 * ->desc tracks information (wq name or
5136 * set_worker_desc()) for the latest execution. If
5137 * current, prepend '+', otherwise '-'.
5139 if (worker->desc[0] != '\0') {
5140 if (worker->current_work)
5141 scnprintf(buf + off, size - off, "+%s",
5144 scnprintf(buf + off, size - off, "-%s",
5147 raw_spin_unlock_irq(&pool->lock);
5151 mutex_unlock(&wq_pool_attach_mutex);
5159 * There are two challenges in supporting CPU hotplug. Firstly, there
5160 * are a lot of assumptions on strong associations among work, pwq and
5161 * pool which make migrating pending and scheduled works very
5162 * difficult to implement without impacting hot paths. Secondly,
5163 * worker pools serve mix of short, long and very long running works making
5164 * blocked draining impractical.
5166 * This is solved by allowing the pools to be disassociated from the CPU
5167 * running as an unbound one and allowing it to be reattached later if the
5168 * cpu comes back online.
5171 static void unbind_workers(int cpu)
5173 struct worker_pool *pool;
5174 struct worker *worker;
5176 for_each_cpu_worker_pool(pool, cpu) {
5177 mutex_lock(&wq_pool_attach_mutex);
5178 raw_spin_lock_irq(&pool->lock);
5181 * We've blocked all attach/detach operations. Make all workers
5182 * unbound and set DISASSOCIATED. Before this, all workers
5183 * must be on the cpu. After this, they may become diasporas.
5184 * And the preemption disabled section in their sched callbacks
5185 * are guaranteed to see WORKER_UNBOUND since the code here
5186 * is on the same cpu.
5188 for_each_pool_worker(worker, pool)
5189 worker->flags |= WORKER_UNBOUND;
5191 pool->flags |= POOL_DISASSOCIATED;
5194 * The handling of nr_running in sched callbacks are disabled
5195 * now. Zap nr_running. After this, nr_running stays zero and
5196 * need_more_worker() and keep_working() are always true as
5197 * long as the worklist is not empty. This pool now behaves as
5198 * an unbound (in terms of concurrency management) pool which
5199 * are served by workers tied to the pool.
5201 pool->nr_running = 0;
5204 * With concurrency management just turned off, a busy
5205 * worker blocking could lead to lengthy stalls. Kick off
5206 * unbound chain execution of currently pending work items.
5208 wake_up_worker(pool);
5210 raw_spin_unlock_irq(&pool->lock);
5212 for_each_pool_worker(worker, pool)
5213 unbind_worker(worker);
5215 mutex_unlock(&wq_pool_attach_mutex);
5220 * rebind_workers - rebind all workers of a pool to the associated CPU
5221 * @pool: pool of interest
5223 * @pool->cpu is coming online. Rebind all workers to the CPU.
5225 static void rebind_workers(struct worker_pool *pool)
5227 struct worker *worker;
5229 lockdep_assert_held(&wq_pool_attach_mutex);
5232 * Restore CPU affinity of all workers. As all idle workers should
5233 * be on the run-queue of the associated CPU before any local
5234 * wake-ups for concurrency management happen, restore CPU affinity
5235 * of all workers first and then clear UNBOUND. As we're called
5236 * from CPU_ONLINE, the following shouldn't fail.
5238 for_each_pool_worker(worker, pool) {
5239 kthread_set_per_cpu(worker->task, pool->cpu);
5240 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5241 pool->attrs->cpumask) < 0);
5244 raw_spin_lock_irq(&pool->lock);
5246 pool->flags &= ~POOL_DISASSOCIATED;
5248 for_each_pool_worker(worker, pool) {
5249 unsigned int worker_flags = worker->flags;
5252 * We want to clear UNBOUND but can't directly call
5253 * worker_clr_flags() or adjust nr_running. Atomically
5254 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5255 * @worker will clear REBOUND using worker_clr_flags() when
5256 * it initiates the next execution cycle thus restoring
5257 * concurrency management. Note that when or whether
5258 * @worker clears REBOUND doesn't affect correctness.
5260 * WRITE_ONCE() is necessary because @worker->flags may be
5261 * tested without holding any lock in
5262 * wq_worker_running(). Without it, NOT_RUNNING test may
5263 * fail incorrectly leading to premature concurrency
5264 * management operations.
5266 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5267 worker_flags |= WORKER_REBOUND;
5268 worker_flags &= ~WORKER_UNBOUND;
5269 WRITE_ONCE(worker->flags, worker_flags);
5272 raw_spin_unlock_irq(&pool->lock);
5276 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5277 * @pool: unbound pool of interest
5278 * @cpu: the CPU which is coming up
5280 * An unbound pool may end up with a cpumask which doesn't have any online
5281 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5282 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5283 * online CPU before, cpus_allowed of all its workers should be restored.
5285 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5287 static cpumask_t cpumask;
5288 struct worker *worker;
5290 lockdep_assert_held(&wq_pool_attach_mutex);
5292 /* is @cpu allowed for @pool? */
5293 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5296 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5298 /* as we're called from CPU_ONLINE, the following shouldn't fail */
5299 for_each_pool_worker(worker, pool)
5300 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5303 int workqueue_prepare_cpu(unsigned int cpu)
5305 struct worker_pool *pool;
5307 for_each_cpu_worker_pool(pool, cpu) {
5308 if (pool->nr_workers)
5310 if (!create_worker(pool))
5316 int workqueue_online_cpu(unsigned int cpu)
5318 struct worker_pool *pool;
5319 struct workqueue_struct *wq;
5322 mutex_lock(&wq_pool_mutex);
5324 for_each_pool(pool, pi) {
5325 mutex_lock(&wq_pool_attach_mutex);
5327 if (pool->cpu == cpu)
5328 rebind_workers(pool);
5329 else if (pool->cpu < 0)
5330 restore_unbound_workers_cpumask(pool, cpu);
5332 mutex_unlock(&wq_pool_attach_mutex);
5335 /* update NUMA affinity of unbound workqueues */
5336 list_for_each_entry(wq, &workqueues, list)
5337 wq_update_unbound_numa(wq, cpu, true);
5339 mutex_unlock(&wq_pool_mutex);
5343 int workqueue_offline_cpu(unsigned int cpu)
5345 struct workqueue_struct *wq;
5347 /* unbinding per-cpu workers should happen on the local CPU */
5348 if (WARN_ON(cpu != smp_processor_id()))
5351 unbind_workers(cpu);
5353 /* update NUMA affinity of unbound workqueues */
5354 mutex_lock(&wq_pool_mutex);
5355 list_for_each_entry(wq, &workqueues, list)
5356 wq_update_unbound_numa(wq, cpu, false);
5357 mutex_unlock(&wq_pool_mutex);
5362 struct work_for_cpu {
5363 struct work_struct work;
5369 static void work_for_cpu_fn(struct work_struct *work)
5371 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5373 wfc->ret = wfc->fn(wfc->arg);
5377 * work_on_cpu - run a function in thread context on a particular cpu
5378 * @cpu: the cpu to run on
5379 * @fn: the function to run
5380 * @arg: the function arg
5382 * It is up to the caller to ensure that the cpu doesn't go offline.
5383 * The caller must not hold any locks which would prevent @fn from completing.
5385 * Return: The value @fn returns.
5387 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5389 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5391 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5392 schedule_work_on(cpu, &wfc.work);
5393 flush_work(&wfc.work);
5394 destroy_work_on_stack(&wfc.work);
5397 EXPORT_SYMBOL_GPL(work_on_cpu);
5400 * work_on_cpu_safe - run a function in thread context on a particular cpu
5401 * @cpu: the cpu to run on
5402 * @fn: the function to run
5403 * @arg: the function argument
5405 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5406 * any locks which would prevent @fn from completing.
5408 * Return: The value @fn returns.
5410 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5415 if (cpu_online(cpu))
5416 ret = work_on_cpu(cpu, fn, arg);
5420 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5421 #endif /* CONFIG_SMP */
5423 #ifdef CONFIG_FREEZER
5426 * freeze_workqueues_begin - begin freezing workqueues
5428 * Start freezing workqueues. After this function returns, all freezable
5429 * workqueues will queue new works to their inactive_works list instead of
5433 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5435 void freeze_workqueues_begin(void)
5437 struct workqueue_struct *wq;
5438 struct pool_workqueue *pwq;
5440 mutex_lock(&wq_pool_mutex);
5442 WARN_ON_ONCE(workqueue_freezing);
5443 workqueue_freezing = true;
5445 list_for_each_entry(wq, &workqueues, list) {
5446 mutex_lock(&wq->mutex);
5447 for_each_pwq(pwq, wq)
5448 pwq_adjust_max_active(pwq);
5449 mutex_unlock(&wq->mutex);
5452 mutex_unlock(&wq_pool_mutex);
5456 * freeze_workqueues_busy - are freezable workqueues still busy?
5458 * Check whether freezing is complete. This function must be called
5459 * between freeze_workqueues_begin() and thaw_workqueues().
5462 * Grabs and releases wq_pool_mutex.
5465 * %true if some freezable workqueues are still busy. %false if freezing
5468 bool freeze_workqueues_busy(void)
5471 struct workqueue_struct *wq;
5472 struct pool_workqueue *pwq;
5474 mutex_lock(&wq_pool_mutex);
5476 WARN_ON_ONCE(!workqueue_freezing);
5478 list_for_each_entry(wq, &workqueues, list) {
5479 if (!(wq->flags & WQ_FREEZABLE))
5482 * nr_active is monotonically decreasing. It's safe
5483 * to peek without lock.
5486 for_each_pwq(pwq, wq) {
5487 WARN_ON_ONCE(pwq->nr_active < 0);
5488 if (pwq->nr_active) {
5497 mutex_unlock(&wq_pool_mutex);
5502 * thaw_workqueues - thaw workqueues
5504 * Thaw workqueues. Normal queueing is restored and all collected
5505 * frozen works are transferred to their respective pool worklists.
5508 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5510 void thaw_workqueues(void)
5512 struct workqueue_struct *wq;
5513 struct pool_workqueue *pwq;
5515 mutex_lock(&wq_pool_mutex);
5517 if (!workqueue_freezing)
5520 workqueue_freezing = false;
5522 /* restore max_active and repopulate worklist */
5523 list_for_each_entry(wq, &workqueues, list) {
5524 mutex_lock(&wq->mutex);
5525 for_each_pwq(pwq, wq)
5526 pwq_adjust_max_active(pwq);
5527 mutex_unlock(&wq->mutex);
5531 mutex_unlock(&wq_pool_mutex);
5533 #endif /* CONFIG_FREEZER */
5535 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
5539 struct workqueue_struct *wq;
5540 struct apply_wqattrs_ctx *ctx, *n;
5542 lockdep_assert_held(&wq_pool_mutex);
5544 list_for_each_entry(wq, &workqueues, list) {
5545 if (!(wq->flags & WQ_UNBOUND))
5547 /* creating multiple pwqs breaks ordering guarantee */
5548 if (wq->flags & __WQ_ORDERED)
5551 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
5557 list_add_tail(&ctx->list, &ctxs);
5560 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5562 apply_wqattrs_commit(ctx);
5563 apply_wqattrs_cleanup(ctx);
5567 mutex_lock(&wq_pool_attach_mutex);
5568 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
5569 mutex_unlock(&wq_pool_attach_mutex);
5575 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5576 * @cpumask: the cpumask to set
5578 * The low-level workqueues cpumask is a global cpumask that limits
5579 * the affinity of all unbound workqueues. This function check the @cpumask
5580 * and apply it to all unbound workqueues and updates all pwqs of them.
5582 * Return: 0 - Success
5583 * -EINVAL - Invalid @cpumask
5584 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5586 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5591 * Not excluding isolated cpus on purpose.
5592 * If the user wishes to include them, we allow that.
5594 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5595 if (!cpumask_empty(cpumask)) {
5596 apply_wqattrs_lock();
5597 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5602 ret = workqueue_apply_unbound_cpumask(cpumask);
5605 apply_wqattrs_unlock();
5613 * Workqueues with WQ_SYSFS flag set is visible to userland via
5614 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5615 * following attributes.
5617 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5618 * max_active RW int : maximum number of in-flight work items
5620 * Unbound workqueues have the following extra attributes.
5622 * pool_ids RO int : the associated pool IDs for each node
5623 * nice RW int : nice value of the workers
5624 * cpumask RW mask : bitmask of allowed CPUs for the workers
5625 * numa RW bool : whether enable NUMA affinity
5628 struct workqueue_struct *wq;
5632 static struct workqueue_struct *dev_to_wq(struct device *dev)
5634 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5639 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5642 struct workqueue_struct *wq = dev_to_wq(dev);
5644 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5646 static DEVICE_ATTR_RO(per_cpu);
5648 static ssize_t max_active_show(struct device *dev,
5649 struct device_attribute *attr, char *buf)
5651 struct workqueue_struct *wq = dev_to_wq(dev);
5653 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5656 static ssize_t max_active_store(struct device *dev,
5657 struct device_attribute *attr, const char *buf,
5660 struct workqueue_struct *wq = dev_to_wq(dev);
5663 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5666 workqueue_set_max_active(wq, val);
5669 static DEVICE_ATTR_RW(max_active);
5671 static struct attribute *wq_sysfs_attrs[] = {
5672 &dev_attr_per_cpu.attr,
5673 &dev_attr_max_active.attr,
5676 ATTRIBUTE_GROUPS(wq_sysfs);
5678 static ssize_t wq_pool_ids_show(struct device *dev,
5679 struct device_attribute *attr, char *buf)
5681 struct workqueue_struct *wq = dev_to_wq(dev);
5682 const char *delim = "";
5683 int node, written = 0;
5687 for_each_node(node) {
5688 written += scnprintf(buf + written, PAGE_SIZE - written,
5689 "%s%d:%d", delim, node,
5690 unbound_pwq_by_node(wq, node)->pool->id);
5693 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5700 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5703 struct workqueue_struct *wq = dev_to_wq(dev);
5706 mutex_lock(&wq->mutex);
5707 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5708 mutex_unlock(&wq->mutex);
5713 /* prepare workqueue_attrs for sysfs store operations */
5714 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5716 struct workqueue_attrs *attrs;
5718 lockdep_assert_held(&wq_pool_mutex);
5720 attrs = alloc_workqueue_attrs();
5724 copy_workqueue_attrs(attrs, wq->unbound_attrs);
5728 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5729 const char *buf, size_t count)
5731 struct workqueue_struct *wq = dev_to_wq(dev);
5732 struct workqueue_attrs *attrs;
5735 apply_wqattrs_lock();
5737 attrs = wq_sysfs_prep_attrs(wq);
5741 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5742 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5743 ret = apply_workqueue_attrs_locked(wq, attrs);
5748 apply_wqattrs_unlock();
5749 free_workqueue_attrs(attrs);
5750 return ret ?: count;
5753 static ssize_t wq_cpumask_show(struct device *dev,
5754 struct device_attribute *attr, char *buf)
5756 struct workqueue_struct *wq = dev_to_wq(dev);
5759 mutex_lock(&wq->mutex);
5760 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5761 cpumask_pr_args(wq->unbound_attrs->cpumask));
5762 mutex_unlock(&wq->mutex);
5766 static ssize_t wq_cpumask_store(struct device *dev,
5767 struct device_attribute *attr,
5768 const char *buf, size_t count)
5770 struct workqueue_struct *wq = dev_to_wq(dev);
5771 struct workqueue_attrs *attrs;
5774 apply_wqattrs_lock();
5776 attrs = wq_sysfs_prep_attrs(wq);
5780 ret = cpumask_parse(buf, attrs->cpumask);
5782 ret = apply_workqueue_attrs_locked(wq, attrs);
5785 apply_wqattrs_unlock();
5786 free_workqueue_attrs(attrs);
5787 return ret ?: count;
5790 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5793 struct workqueue_struct *wq = dev_to_wq(dev);
5796 mutex_lock(&wq->mutex);
5797 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5798 !wq->unbound_attrs->no_numa);
5799 mutex_unlock(&wq->mutex);
5804 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5805 const char *buf, size_t count)
5807 struct workqueue_struct *wq = dev_to_wq(dev);
5808 struct workqueue_attrs *attrs;
5809 int v, ret = -ENOMEM;
5811 apply_wqattrs_lock();
5813 attrs = wq_sysfs_prep_attrs(wq);
5818 if (sscanf(buf, "%d", &v) == 1) {
5819 attrs->no_numa = !v;
5820 ret = apply_workqueue_attrs_locked(wq, attrs);
5824 apply_wqattrs_unlock();
5825 free_workqueue_attrs(attrs);
5826 return ret ?: count;
5829 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5830 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5831 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5832 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5833 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5837 static struct bus_type wq_subsys = {
5838 .name = "workqueue",
5839 .dev_groups = wq_sysfs_groups,
5842 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5843 struct device_attribute *attr, char *buf)
5847 mutex_lock(&wq_pool_mutex);
5848 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5849 cpumask_pr_args(wq_unbound_cpumask));
5850 mutex_unlock(&wq_pool_mutex);
5855 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5856 struct device_attribute *attr, const char *buf, size_t count)
5858 cpumask_var_t cpumask;
5861 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5864 ret = cpumask_parse(buf, cpumask);
5866 ret = workqueue_set_unbound_cpumask(cpumask);
5868 free_cpumask_var(cpumask);
5869 return ret ? ret : count;
5872 static struct device_attribute wq_sysfs_cpumask_attr =
5873 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5874 wq_unbound_cpumask_store);
5876 static int __init wq_sysfs_init(void)
5878 struct device *dev_root;
5881 err = subsys_virtual_register(&wq_subsys, NULL);
5885 dev_root = bus_get_dev_root(&wq_subsys);
5887 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
5888 put_device(dev_root);
5892 core_initcall(wq_sysfs_init);
5894 static void wq_device_release(struct device *dev)
5896 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5902 * workqueue_sysfs_register - make a workqueue visible in sysfs
5903 * @wq: the workqueue to register
5905 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5906 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5907 * which is the preferred method.
5909 * Workqueue user should use this function directly iff it wants to apply
5910 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5911 * apply_workqueue_attrs() may race against userland updating the
5914 * Return: 0 on success, -errno on failure.
5916 int workqueue_sysfs_register(struct workqueue_struct *wq)
5918 struct wq_device *wq_dev;
5922 * Adjusting max_active or creating new pwqs by applying
5923 * attributes breaks ordering guarantee. Disallow exposing ordered
5926 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5929 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5934 wq_dev->dev.bus = &wq_subsys;
5935 wq_dev->dev.release = wq_device_release;
5936 dev_set_name(&wq_dev->dev, "%s", wq->name);
5939 * unbound_attrs are created separately. Suppress uevent until
5940 * everything is ready.
5942 dev_set_uevent_suppress(&wq_dev->dev, true);
5944 ret = device_register(&wq_dev->dev);
5946 put_device(&wq_dev->dev);
5951 if (wq->flags & WQ_UNBOUND) {
5952 struct device_attribute *attr;
5954 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5955 ret = device_create_file(&wq_dev->dev, attr);
5957 device_unregister(&wq_dev->dev);
5964 dev_set_uevent_suppress(&wq_dev->dev, false);
5965 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5970 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5971 * @wq: the workqueue to unregister
5973 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5975 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5977 struct wq_device *wq_dev = wq->wq_dev;
5983 device_unregister(&wq_dev->dev);
5985 #else /* CONFIG_SYSFS */
5986 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5987 #endif /* CONFIG_SYSFS */
5990 * Workqueue watchdog.
5992 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5993 * flush dependency, a concurrency managed work item which stays RUNNING
5994 * indefinitely. Workqueue stalls can be very difficult to debug as the
5995 * usual warning mechanisms don't trigger and internal workqueue state is
5998 * Workqueue watchdog monitors all worker pools periodically and dumps
5999 * state if some pools failed to make forward progress for a while where
6000 * forward progress is defined as the first item on ->worklist changing.
6002 * This mechanism is controlled through the kernel parameter
6003 * "workqueue.watchdog_thresh" which can be updated at runtime through the
6004 * corresponding sysfs parameter file.
6006 #ifdef CONFIG_WQ_WATCHDOG
6008 static unsigned long wq_watchdog_thresh = 30;
6009 static struct timer_list wq_watchdog_timer;
6011 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6012 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6015 * Show workers that might prevent the processing of pending work items.
6016 * The only candidates are CPU-bound workers in the running state.
6017 * Pending work items should be handled by another idle worker
6018 * in all other situations.
6020 static void show_cpu_pool_hog(struct worker_pool *pool)
6022 struct worker *worker;
6023 unsigned long flags;
6026 raw_spin_lock_irqsave(&pool->lock, flags);
6028 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6029 if (task_is_running(worker->task)) {
6031 * Defer printing to avoid deadlocks in console
6032 * drivers that queue work while holding locks
6033 * also taken in their write paths.
6035 printk_deferred_enter();
6037 pr_info("pool %d:\n", pool->id);
6038 sched_show_task(worker->task);
6040 printk_deferred_exit();
6044 raw_spin_unlock_irqrestore(&pool->lock, flags);
6047 static void show_cpu_pools_hogs(void)
6049 struct worker_pool *pool;
6052 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6056 for_each_pool(pool, pi) {
6057 if (pool->cpu_stall)
6058 show_cpu_pool_hog(pool);
6065 static void wq_watchdog_reset_touched(void)
6069 wq_watchdog_touched = jiffies;
6070 for_each_possible_cpu(cpu)
6071 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6074 static void wq_watchdog_timer_fn(struct timer_list *unused)
6076 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6077 bool lockup_detected = false;
6078 bool cpu_pool_stall = false;
6079 unsigned long now = jiffies;
6080 struct worker_pool *pool;
6088 for_each_pool(pool, pi) {
6089 unsigned long pool_ts, touched, ts;
6091 pool->cpu_stall = false;
6092 if (list_empty(&pool->worklist))
6096 * If a virtual machine is stopped by the host it can look to
6097 * the watchdog like a stall.
6099 kvm_check_and_clear_guest_paused();
6101 /* get the latest of pool and touched timestamps */
6103 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
6105 touched = READ_ONCE(wq_watchdog_touched);
6106 pool_ts = READ_ONCE(pool->watchdog_ts);
6108 if (time_after(pool_ts, touched))
6114 if (time_after(now, ts + thresh)) {
6115 lockup_detected = true;
6116 if (pool->cpu >= 0) {
6117 pool->cpu_stall = true;
6118 cpu_pool_stall = true;
6120 pr_emerg("BUG: workqueue lockup - pool");
6121 pr_cont_pool_info(pool);
6122 pr_cont(" stuck for %us!\n",
6123 jiffies_to_msecs(now - pool_ts) / 1000);
6131 if (lockup_detected)
6132 show_all_workqueues();
6135 show_cpu_pools_hogs();
6137 wq_watchdog_reset_touched();
6138 mod_timer(&wq_watchdog_timer, jiffies + thresh);
6141 notrace void wq_watchdog_touch(int cpu)
6144 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6146 wq_watchdog_touched = jiffies;
6149 static void wq_watchdog_set_thresh(unsigned long thresh)
6151 wq_watchdog_thresh = 0;
6152 del_timer_sync(&wq_watchdog_timer);
6155 wq_watchdog_thresh = thresh;
6156 wq_watchdog_reset_touched();
6157 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
6161 static int wq_watchdog_param_set_thresh(const char *val,
6162 const struct kernel_param *kp)
6164 unsigned long thresh;
6167 ret = kstrtoul(val, 0, &thresh);
6172 wq_watchdog_set_thresh(thresh);
6174 wq_watchdog_thresh = thresh;
6179 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
6180 .set = wq_watchdog_param_set_thresh,
6181 .get = param_get_ulong,
6184 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
6187 static void wq_watchdog_init(void)
6189 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
6190 wq_watchdog_set_thresh(wq_watchdog_thresh);
6193 #else /* CONFIG_WQ_WATCHDOG */
6195 static inline void wq_watchdog_init(void) { }
6197 #endif /* CONFIG_WQ_WATCHDOG */
6199 static void __init wq_numa_init(void)
6204 if (num_possible_nodes() <= 1)
6207 if (wq_disable_numa) {
6208 pr_info("workqueue: NUMA affinity support disabled\n");
6212 for_each_possible_cpu(cpu) {
6213 if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6214 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6219 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
6220 BUG_ON(!wq_update_unbound_numa_attrs_buf);
6223 * We want masks of possible CPUs of each node which isn't readily
6224 * available. Build one from cpu_to_node() which should have been
6225 * fully initialized by now.
6227 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
6231 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
6232 node_online(node) ? node : NUMA_NO_NODE));
6234 for_each_possible_cpu(cpu) {
6235 node = cpu_to_node(cpu);
6236 cpumask_set_cpu(cpu, tbl[node]);
6239 wq_numa_possible_cpumask = tbl;
6240 wq_numa_enabled = true;
6244 * workqueue_init_early - early init for workqueue subsystem
6246 * This is the first half of two-staged workqueue subsystem initialization
6247 * and invoked as soon as the bare basics - memory allocation, cpumasks and
6248 * idr are up. It sets up all the data structures and system workqueues
6249 * and allows early boot code to create workqueues and queue/cancel work
6250 * items. Actual work item execution starts only after kthreads can be
6251 * created and scheduled right before early initcalls.
6253 void __init workqueue_init_early(void)
6255 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6258 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6260 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
6261 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
6262 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
6264 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6266 /* initialize CPU pools */
6267 for_each_possible_cpu(cpu) {
6268 struct worker_pool *pool;
6271 for_each_cpu_worker_pool(pool, cpu) {
6272 BUG_ON(init_worker_pool(pool));
6274 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
6275 pool->attrs->nice = std_nice[i++];
6276 pool->node = cpu_to_node(cpu);
6279 mutex_lock(&wq_pool_mutex);
6280 BUG_ON(worker_pool_assign_id(pool));
6281 mutex_unlock(&wq_pool_mutex);
6285 /* create default unbound and ordered wq attrs */
6286 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6287 struct workqueue_attrs *attrs;
6289 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6290 attrs->nice = std_nice[i];
6291 unbound_std_wq_attrs[i] = attrs;
6294 * An ordered wq should have only one pwq as ordering is
6295 * guaranteed by max_active which is enforced by pwqs.
6296 * Turn off NUMA so that dfl_pwq is used for all nodes.
6298 BUG_ON(!(attrs = alloc_workqueue_attrs()));
6299 attrs->nice = std_nice[i];
6300 attrs->no_numa = true;
6301 ordered_wq_attrs[i] = attrs;
6304 system_wq = alloc_workqueue("events", 0, 0);
6305 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6306 system_long_wq = alloc_workqueue("events_long", 0, 0);
6307 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6308 WQ_UNBOUND_MAX_ACTIVE);
6309 system_freezable_wq = alloc_workqueue("events_freezable",
6311 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6312 WQ_POWER_EFFICIENT, 0);
6313 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6314 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6316 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
6317 !system_unbound_wq || !system_freezable_wq ||
6318 !system_power_efficient_wq ||
6319 !system_freezable_power_efficient_wq);
6323 * workqueue_init - bring workqueue subsystem fully online
6325 * This is the latter half of two-staged workqueue subsystem initialization
6326 * and invoked as soon as kthreads can be created and scheduled.
6327 * Workqueues have been created and work items queued on them, but there
6328 * are no kworkers executing the work items yet. Populate the worker pools
6329 * with the initial workers and enable future kworker creations.
6331 void __init workqueue_init(void)
6333 struct workqueue_struct *wq;
6334 struct worker_pool *pool;
6338 * It'd be simpler to initialize NUMA in workqueue_init_early() but
6339 * CPU to node mapping may not be available that early on some
6340 * archs such as power and arm64. As per-cpu pools created
6341 * previously could be missing node hint and unbound pools NUMA
6342 * affinity, fix them up.
6344 * Also, while iterating workqueues, create rescuers if requested.
6348 mutex_lock(&wq_pool_mutex);
6350 for_each_possible_cpu(cpu) {
6351 for_each_cpu_worker_pool(pool, cpu) {
6352 pool->node = cpu_to_node(cpu);
6356 list_for_each_entry(wq, &workqueues, list) {
6357 wq_update_unbound_numa(wq, smp_processor_id(), true);
6358 WARN(init_rescuer(wq),
6359 "workqueue: failed to create early rescuer for %s",
6363 mutex_unlock(&wq_pool_mutex);
6365 /* create the initial workers */
6366 for_each_online_cpu(cpu) {
6367 for_each_cpu_worker_pool(pool, cpu) {
6368 pool->flags &= ~POOL_DISASSOCIATED;
6369 BUG_ON(!create_worker(pool));
6373 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6374 BUG_ON(!create_worker(pool));
6381 * Despite the naming, this is a no-op function which is here only for avoiding
6382 * link error. Since compile-time warning may fail to catch, we will need to
6383 * emit run-time warning from __flush_workqueue().
6385 void __warn_flushing_systemwide_wq(void) { }
6386 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);