1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/slow-work.h>
14 #include <linux/kthread.h>
15 #include <linux/freezer.h>
16 #include <linux/wait.h>
17 #include <asm/system.h>
19 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
21 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
24 static void slow_work_cull_timeout(unsigned long);
25 static void slow_work_oom_timeout(unsigned long);
28 * The pool of threads has at least min threads in it as long as someone is
29 * using the facility, and may have as many as max.
31 * A portion of the pool may be processing very slow operations.
33 static unsigned slow_work_min_threads = 2;
34 static unsigned slow_work_max_threads = 4;
35 static unsigned vslow_work_proportion = 50; /* % of threads that may process
37 static atomic_t slow_work_thread_count;
38 static atomic_t vslow_work_executing_count;
40 static bool slow_work_may_not_start_new_thread;
41 static bool slow_work_cull; /* cull a thread due to lack of activity */
42 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
43 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
44 static struct slow_work slow_work_new_thread; /* new thread starter */
47 * The queues of work items and the lock governing access to them. These are
48 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
49 * as the number of threads bears no relation to the number of CPUs.
51 * There are two queues of work items: one for slow work items, and one for
52 * very slow work items.
54 static LIST_HEAD(slow_work_queue);
55 static LIST_HEAD(vslow_work_queue);
56 static DEFINE_SPINLOCK(slow_work_queue_lock);
59 * The thread controls. A variable used to signal to the threads that they
60 * should exit when the queue is empty, a waitqueue used by the threads to wait
61 * for signals, and a completion set by the last thread to exit.
63 static bool slow_work_threads_should_exit;
64 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
65 static DECLARE_COMPLETION(slow_work_last_thread_exited);
68 * The number of users of the thread pool and its lock. Whilst this is zero we
69 * have no threads hanging around, and when this reaches zero, we wait for all
70 * active or queued work items to complete and kill all the threads we do have.
72 static int slow_work_user_count;
73 static DEFINE_MUTEX(slow_work_user_lock);
76 * Calculate the maximum number of active threads in the pool that are
77 * permitted to process very slow work items.
79 * The answer is rounded up to at least 1, but may not equal or exceed the
80 * maximum number of the threads in the pool. This means we always have at
81 * least one thread that can process slow work items, and we always have at
82 * least one thread that won't get tied up doing so.
84 static unsigned slow_work_calc_vsmax(void)
88 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
90 vsmax = max(vsmax, 1U);
91 return min(vsmax, slow_work_max_threads - 1);
95 * Attempt to execute stuff queued on a slow thread. Return true if we managed
96 * it, false if there was nothing to do.
98 static bool slow_work_execute(void)
100 struct slow_work *work = NULL;
104 vsmax = slow_work_calc_vsmax();
106 /* see if we can schedule a new thread to be started if we're not
107 * keeping up with the work */
108 if (!waitqueue_active(&slow_work_thread_wq) &&
109 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
110 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
111 !slow_work_may_not_start_new_thread)
112 slow_work_enqueue(&slow_work_new_thread);
114 /* find something to execute */
115 spin_lock_irq(&slow_work_queue_lock);
116 if (!list_empty(&vslow_work_queue) &&
117 atomic_read(&vslow_work_executing_count) < vsmax) {
118 work = list_entry(vslow_work_queue.next,
119 struct slow_work, link);
120 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
122 list_del_init(&work->link);
123 atomic_inc(&vslow_work_executing_count);
125 } else if (!list_empty(&slow_work_queue)) {
126 work = list_entry(slow_work_queue.next,
127 struct slow_work, link);
128 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
130 list_del_init(&work->link);
133 very_slow = false; /* avoid the compiler warning */
135 spin_unlock_irq(&slow_work_queue_lock);
140 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
143 work->ops->execute(work);
146 atomic_dec(&vslow_work_executing_count);
147 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
149 /* if someone tried to enqueue the item whilst we were executing it,
150 * then it'll be left unenqueued to avoid multiple threads trying to
151 * execute it simultaneously
153 * there is, however, a race between us testing the pending flag and
154 * getting the spinlock, and between the enqueuer setting the pending
155 * flag and getting the spinlock, so we use a deferral bit to tell us
156 * if the enqueuer got there first
158 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
159 spin_lock_irq(&slow_work_queue_lock);
161 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
162 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
165 spin_unlock_irq(&slow_work_queue_lock);
168 work->ops->put_ref(work);
172 /* we must complete the enqueue operation
173 * - we transfer our ref on the item back to the appropriate queue
174 * - don't wake another thread up as we're awake already
176 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
177 list_add_tail(&work->link, &vslow_work_queue);
179 list_add_tail(&work->link, &slow_work_queue);
180 spin_unlock_irq(&slow_work_queue_lock);
185 * slow_work_enqueue - Schedule a slow work item for processing
186 * @work: The work item to queue
188 * Schedule a slow work item for processing. If the item is already undergoing
189 * execution, this guarantees not to re-enter the execution routine until the
190 * first execution finishes.
192 * The item is pinned by this function as it retains a reference to it, managed
193 * through the item operations. The item is unpinned once it has been
196 * An item may hog the thread that is running it for a relatively large amount
197 * of time, sufficient, for example, to perform several lookup, mkdir, create
198 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
200 * Conversely, if a number of items are awaiting processing, it may take some
201 * time before any given item is given attention. The number of threads in the
202 * pool may be increased to deal with demand, but only up to a limit.
204 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
205 * the very slow queue, from which only a portion of the threads will be
206 * allowed to pick items to execute. This ensures that very slow items won't
207 * overly block ones that are just ordinarily slow.
209 * Returns 0 if successful, -EAGAIN if not.
211 int slow_work_enqueue(struct slow_work *work)
215 BUG_ON(slow_work_user_count <= 0);
218 BUG_ON(!work->ops->get_ref);
220 /* when honouring an enqueue request, we only promise that we will run
221 * the work function in the future; we do not promise to run it once
222 * per enqueue request
224 * we use the PENDING bit to merge together repeat requests without
225 * having to disable IRQs and take the spinlock, whilst still
226 * maintaining our promise
228 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
229 spin_lock_irqsave(&slow_work_queue_lock, flags);
231 /* we promise that we will not attempt to execute the work
232 * function in more than one thread simultaneously
234 * this, however, leaves us with a problem if we're asked to
235 * enqueue the work whilst someone is executing the work
236 * function as simply queueing the work immediately means that
237 * another thread may try executing it whilst it is already
240 * to deal with this, we set the ENQ_DEFERRED bit instead of
241 * enqueueing, and the thread currently executing the work
242 * function will enqueue the work item when the work function
243 * returns and it has cleared the EXECUTING bit
245 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
246 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
248 if (work->ops->get_ref(work) < 0)
250 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
251 list_add_tail(&work->link, &vslow_work_queue);
253 list_add_tail(&work->link, &slow_work_queue);
254 wake_up(&slow_work_thread_wq);
257 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
262 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
265 EXPORT_SYMBOL(slow_work_enqueue);
268 * Worker thread culling algorithm
270 static bool slow_work_cull_thread(void)
273 bool do_cull = false;
275 spin_lock_irqsave(&slow_work_queue_lock, flags);
277 if (slow_work_cull) {
278 slow_work_cull = false;
280 if (list_empty(&slow_work_queue) &&
281 list_empty(&vslow_work_queue) &&
282 atomic_read(&slow_work_thread_count) >
283 slow_work_min_threads) {
284 mod_timer(&slow_work_cull_timer,
285 jiffies + SLOW_WORK_CULL_TIMEOUT);
290 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
295 * Determine if there is slow work available for dispatch
297 static inline bool slow_work_available(int vsmax)
299 return !list_empty(&slow_work_queue) ||
300 (!list_empty(&vslow_work_queue) &&
301 atomic_read(&vslow_work_executing_count) < vsmax);
305 * Worker thread dispatcher
307 static int slow_work_thread(void *_data)
314 set_user_nice(current, -5);
317 vsmax = vslow_work_proportion;
318 vsmax *= atomic_read(&slow_work_thread_count);
321 prepare_to_wait(&slow_work_thread_wq, &wait,
323 if (!freezing(current) &&
324 !slow_work_threads_should_exit &&
325 !slow_work_available(vsmax) &&
328 finish_wait(&slow_work_thread_wq, &wait);
332 vsmax = vslow_work_proportion;
333 vsmax *= atomic_read(&slow_work_thread_count);
336 if (slow_work_available(vsmax) && slow_work_execute()) {
338 if (list_empty(&slow_work_queue) &&
339 list_empty(&vslow_work_queue) &&
340 atomic_read(&slow_work_thread_count) >
341 slow_work_min_threads)
342 mod_timer(&slow_work_cull_timer,
343 jiffies + SLOW_WORK_CULL_TIMEOUT);
347 if (slow_work_threads_should_exit)
350 if (slow_work_cull && slow_work_cull_thread())
354 if (atomic_dec_and_test(&slow_work_thread_count))
355 complete_and_exit(&slow_work_last_thread_exited, 0);
360 * Handle thread cull timer expiration
362 static void slow_work_cull_timeout(unsigned long data)
364 slow_work_cull = true;
365 wake_up(&slow_work_thread_wq);
369 * Get a reference on slow work thread starter
371 static int slow_work_new_thread_get_ref(struct slow_work *work)
377 * Drop a reference on slow work thread starter
379 static void slow_work_new_thread_put_ref(struct slow_work *work)
384 * Start a new slow work thread
386 static void slow_work_new_thread_execute(struct slow_work *work)
388 struct task_struct *p;
390 if (slow_work_threads_should_exit)
393 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
396 if (!mutex_trylock(&slow_work_user_lock))
399 slow_work_may_not_start_new_thread = true;
400 atomic_inc(&slow_work_thread_count);
401 p = kthread_run(slow_work_thread, NULL, "kslowd");
403 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
404 if (atomic_dec_and_test(&slow_work_thread_count))
405 BUG(); /* we're running on a slow work thread... */
406 mod_timer(&slow_work_oom_timer,
407 jiffies + SLOW_WORK_OOM_TIMEOUT);
409 /* ratelimit the starting of new threads */
410 mod_timer(&slow_work_oom_timer, jiffies + 1);
413 mutex_unlock(&slow_work_user_lock);
416 static const struct slow_work_ops slow_work_new_thread_ops = {
417 .get_ref = slow_work_new_thread_get_ref,
418 .put_ref = slow_work_new_thread_put_ref,
419 .execute = slow_work_new_thread_execute,
423 * post-OOM new thread start suppression expiration
425 static void slow_work_oom_timeout(unsigned long data)
427 slow_work_may_not_start_new_thread = false;
431 * slow_work_register_user - Register a user of the facility
433 * Register a user of the facility, starting up the initial threads if there
434 * aren't any other users at this point. This will return 0 if successful, or
437 int slow_work_register_user(void)
439 struct task_struct *p;
442 mutex_lock(&slow_work_user_lock);
444 if (slow_work_user_count == 0) {
445 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
446 init_completion(&slow_work_last_thread_exited);
448 slow_work_threads_should_exit = false;
449 slow_work_init(&slow_work_new_thread,
450 &slow_work_new_thread_ops);
451 slow_work_may_not_start_new_thread = false;
452 slow_work_cull = false;
454 /* start the minimum number of threads */
455 for (loop = 0; loop < slow_work_min_threads; loop++) {
456 atomic_inc(&slow_work_thread_count);
457 p = kthread_run(slow_work_thread, NULL, "kslowd");
461 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
464 slow_work_user_count++;
465 mutex_unlock(&slow_work_user_lock);
469 if (atomic_dec_and_test(&slow_work_thread_count))
470 complete(&slow_work_last_thread_exited);
472 printk(KERN_ERR "Slow work thread pool:"
473 " Aborting startup on ENOMEM\n");
474 slow_work_threads_should_exit = true;
475 wake_up_all(&slow_work_thread_wq);
476 wait_for_completion(&slow_work_last_thread_exited);
477 printk(KERN_ERR "Slow work thread pool: Aborted\n");
479 mutex_unlock(&slow_work_user_lock);
482 EXPORT_SYMBOL(slow_work_register_user);
485 * slow_work_unregister_user - Unregister a user of the facility
487 * Unregister a user of the facility, killing all the threads if this was the
490 void slow_work_unregister_user(void)
492 mutex_lock(&slow_work_user_lock);
494 BUG_ON(slow_work_user_count <= 0);
496 slow_work_user_count--;
497 if (slow_work_user_count == 0) {
498 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
499 slow_work_threads_should_exit = true;
500 wake_up_all(&slow_work_thread_wq);
501 wait_for_completion(&slow_work_last_thread_exited);
502 printk(KERN_NOTICE "Slow work thread pool:"
503 " Shut down complete\n");
506 del_timer_sync(&slow_work_cull_timer);
508 mutex_unlock(&slow_work_user_lock);
510 EXPORT_SYMBOL(slow_work_unregister_user);
513 * Initialise the slow work facility
515 static int __init init_slow_work(void)
517 unsigned nr_cpus = num_possible_cpus();
519 if (nr_cpus > slow_work_max_threads)
520 slow_work_max_threads = nr_cpus;
524 subsys_initcall(init_slow_work);