2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
27 static DEFINE_MUTEX(cpu_add_remove_lock);
30 * The following two API's must be used when attempting
31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
33 void cpu_maps_update_begin(void)
35 mutex_lock(&cpu_add_remove_lock);
38 void cpu_maps_update_done(void)
40 mutex_unlock(&cpu_add_remove_lock);
43 static RAW_NOTIFIER_HEAD(cpu_chain);
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
48 static int cpu_hotplug_disabled;
50 #ifdef CONFIG_HOTPLUG_CPU
53 struct task_struct *active_writer;
54 struct mutex lock; /* Synchronizes accesses to refcount, */
56 * Also blocks the new readers during
57 * an ongoing cpu hotplug operation.
61 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
66 void get_online_cpus(void)
69 if (cpu_hotplug.active_writer == current)
71 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock);
76 EXPORT_SYMBOL_GPL(get_online_cpus);
78 void put_online_cpus(void)
80 if (cpu_hotplug.active_writer == current)
82 mutex_lock(&cpu_hotplug.lock);
84 if (WARN_ON(!cpu_hotplug.refcount))
85 cpu_hotplug.refcount++; /* try to fix things up */
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer);
89 mutex_unlock(&cpu_hotplug.lock);
92 EXPORT_SYMBOL_GPL(put_online_cpus);
95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero.
98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock
101 * Since cpu_hotplug_begin() is always called after invoking
102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping
107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again.
112 * However, this is very difficult to achieve in practice since
113 * get_online_cpus() not an api which is called all that often.
116 static void cpu_hotplug_begin(void)
118 cpu_hotplug.active_writer = current;
121 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount))
124 __set_current_state(TASK_UNINTERRUPTIBLE);
125 mutex_unlock(&cpu_hotplug.lock);
130 static void cpu_hotplug_done(void)
132 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock);
137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations.
143 void cpu_hotplug_disable(void)
145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done();
150 void cpu_hotplug_enable(void)
152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done();
157 #else /* #if CONFIG_HOTPLUG_CPU */
158 static void cpu_hotplug_begin(void) {}
159 static void cpu_hotplug_done(void) {}
160 #endif /* #else #if CONFIG_HOTPLUG_CPU */
162 /* Need to know about CPUs going up/down? */
163 int __ref register_cpu_notifier(struct notifier_block *nb)
166 cpu_maps_update_begin();
167 ret = raw_notifier_chain_register(&cpu_chain, nb);
168 cpu_maps_update_done();
172 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
177 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
180 return notifier_to_errno(ret);
183 static int cpu_notify(unsigned long val, void *v)
185 return __cpu_notify(val, v, -1, NULL);
188 #ifdef CONFIG_HOTPLUG_CPU
190 static void cpu_notify_nofail(unsigned long val, void *v)
192 BUG_ON(cpu_notify(val, v));
194 EXPORT_SYMBOL(register_cpu_notifier);
196 void __ref unregister_cpu_notifier(struct notifier_block *nb)
198 cpu_maps_update_begin();
199 raw_notifier_chain_unregister(&cpu_chain, nb);
200 cpu_maps_update_done();
202 EXPORT_SYMBOL(unregister_cpu_notifier);
205 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
208 * This function walks all processes, finds a valid mm struct for each one and
209 * then clears a corresponding bit in mm's cpumask. While this all sounds
210 * trivial, there are various non-obvious corner cases, which this function
211 * tries to solve in a safe manner.
213 * Also note that the function uses a somewhat relaxed locking scheme, so it may
214 * be called only for an already offlined CPU.
216 void clear_tasks_mm_cpumask(int cpu)
218 struct task_struct *p;
221 * This function is called after the cpu is taken down and marked
222 * offline, so its not like new tasks will ever get this cpu set in
223 * their mm mask. -- Peter Zijlstra
224 * Thus, we may use rcu_read_lock() here, instead of grabbing
225 * full-fledged tasklist_lock.
227 WARN_ON(cpu_online(cpu));
229 for_each_process(p) {
230 struct task_struct *t;
233 * Main thread might exit, but other threads may still have
234 * a valid mm. Find one.
236 t = find_lock_task_mm(p);
239 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
245 static inline void check_for_tasks(int cpu)
247 struct task_struct *p;
248 cputime_t utime, stime;
250 write_lock_irq(&tasklist_lock);
251 for_each_process(p) {
252 task_cputime(p, &utime, &stime);
253 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
255 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
256 "(state = %ld, flags = %x)\n",
257 p->comm, task_pid_nr(p), cpu,
260 write_unlock_irq(&tasklist_lock);
263 struct take_cpu_down_param {
268 /* Take this CPU down. */
269 static int __ref take_cpu_down(void *_param)
271 struct take_cpu_down_param *param = _param;
274 /* Ensure this CPU doesn't handle any more interrupts. */
275 err = __cpu_disable();
279 cpu_notify(CPU_DYING | param->mod, param->hcpu);
280 /* Park the stopper thread */
281 kthread_park(current);
285 /* Requires cpu_add_remove_lock to be held */
286 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
288 int err, nr_calls = 0;
289 void *hcpu = (void *)(long)cpu;
290 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
291 struct take_cpu_down_param tcd_param = {
296 if (num_online_cpus() == 1)
299 if (!cpu_online(cpu))
304 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
307 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
308 printk("%s: attempt to take down CPU %u failed\n",
312 smpboot_park_threads(cpu);
314 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
316 /* CPU didn't die: tell everyone. Can't complain. */
317 smpboot_unpark_threads(cpu);
318 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
321 BUG_ON(cpu_online(cpu));
324 * The migration_call() CPU_DYING callback will have removed all
325 * runnable tasks from the cpu, there's only the idle task left now
326 * that the migration thread is done doing the stop_machine thing.
328 * Wait for the stop thread to go away.
330 while (!idle_cpu(cpu))
333 /* This actually kills the CPU. */
336 /* CPU is completely dead: tell everyone. Too late to complain. */
337 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
339 check_for_tasks(cpu);
344 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
348 int __ref cpu_down(unsigned int cpu)
352 cpu_maps_update_begin();
354 if (cpu_hotplug_disabled) {
359 err = _cpu_down(cpu, 0);
362 cpu_maps_update_done();
365 EXPORT_SYMBOL(cpu_down);
366 #endif /*CONFIG_HOTPLUG_CPU*/
368 /* Requires cpu_add_remove_lock to be held */
369 static int _cpu_up(unsigned int cpu, int tasks_frozen)
371 int ret, nr_calls = 0;
372 void *hcpu = (void *)(long)cpu;
373 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
374 struct task_struct *idle;
378 if (cpu_online(cpu) || !cpu_present(cpu)) {
383 idle = idle_thread_get(cpu);
389 ret = smpboot_create_threads(cpu);
393 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
396 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
401 /* Arch-specific enabling code. */
402 ret = __cpu_up(cpu, idle);
405 BUG_ON(!cpu_online(cpu));
407 /* Wake the per cpu threads */
408 smpboot_unpark_threads(cpu);
410 /* Now call notifier in preparation. */
411 cpu_notify(CPU_ONLINE | mod, hcpu);
415 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
422 int cpu_up(unsigned int cpu)
426 #ifdef CONFIG_MEMORY_HOTPLUG
431 if (!cpu_possible(cpu)) {
432 printk(KERN_ERR "can't online cpu %d because it is not "
433 "configured as may-hotadd at boot time\n", cpu);
434 #if defined(CONFIG_IA64)
435 printk(KERN_ERR "please check additional_cpus= boot "
441 #ifdef CONFIG_MEMORY_HOTPLUG
442 nid = cpu_to_node(cpu);
443 if (!node_online(nid)) {
444 err = mem_online_node(nid);
449 pgdat = NODE_DATA(nid);
452 "Can't online cpu %d due to NULL pgdat\n", cpu);
456 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
457 mutex_lock(&zonelists_mutex);
458 build_all_zonelists(NULL, NULL);
459 mutex_unlock(&zonelists_mutex);
463 cpu_maps_update_begin();
465 if (cpu_hotplug_disabled) {
470 err = _cpu_up(cpu, 0);
473 cpu_maps_update_done();
476 EXPORT_SYMBOL_GPL(cpu_up);
478 #ifdef CONFIG_PM_SLEEP_SMP
479 static cpumask_var_t frozen_cpus;
481 int disable_nonboot_cpus(void)
483 int cpu, first_cpu, error = 0;
485 cpu_maps_update_begin();
486 first_cpu = cpumask_first(cpu_online_mask);
488 * We take down all of the non-boot CPUs in one shot to avoid races
489 * with the userspace trying to use the CPU hotplug at the same time
491 cpumask_clear(frozen_cpus);
493 printk("Disabling non-boot CPUs ...\n");
494 for_each_online_cpu(cpu) {
495 if (cpu == first_cpu)
497 error = _cpu_down(cpu, 1);
499 cpumask_set_cpu(cpu, frozen_cpus);
501 printk(KERN_ERR "Error taking CPU%d down: %d\n",
508 BUG_ON(num_online_cpus() > 1);
509 /* Make sure the CPUs won't be enabled by someone else */
510 cpu_hotplug_disabled = 1;
512 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
514 cpu_maps_update_done();
518 void __weak arch_enable_nonboot_cpus_begin(void)
522 void __weak arch_enable_nonboot_cpus_end(void)
526 void __ref enable_nonboot_cpus(void)
530 /* Allow everyone to use the CPU hotplug again */
531 cpu_maps_update_begin();
532 cpu_hotplug_disabled = 0;
533 if (cpumask_empty(frozen_cpus))
536 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
538 arch_enable_nonboot_cpus_begin();
540 for_each_cpu(cpu, frozen_cpus) {
541 error = _cpu_up(cpu, 1);
543 printk(KERN_INFO "CPU%d is up\n", cpu);
546 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
549 arch_enable_nonboot_cpus_end();
551 cpumask_clear(frozen_cpus);
553 cpu_maps_update_done();
556 static int __init alloc_frozen_cpus(void)
558 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
562 core_initcall(alloc_frozen_cpus);
565 * When callbacks for CPU hotplug notifications are being executed, we must
566 * ensure that the state of the system with respect to the tasks being frozen
567 * or not, as reported by the notification, remains unchanged *throughout the
568 * duration* of the execution of the callbacks.
569 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
571 * This synchronization is implemented by mutually excluding regular CPU
572 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
573 * Hibernate notifications.
576 cpu_hotplug_pm_callback(struct notifier_block *nb,
577 unsigned long action, void *ptr)
581 case PM_SUSPEND_PREPARE:
582 case PM_HIBERNATION_PREPARE:
583 cpu_hotplug_disable();
586 case PM_POST_SUSPEND:
587 case PM_POST_HIBERNATION:
588 cpu_hotplug_enable();
599 static int __init cpu_hotplug_pm_sync_init(void)
602 * cpu_hotplug_pm_callback has higher priority than x86
603 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
604 * to disable cpu hotplug to avoid cpu hotplug race.
606 pm_notifier(cpu_hotplug_pm_callback, 0);
609 core_initcall(cpu_hotplug_pm_sync_init);
611 #endif /* CONFIG_PM_SLEEP_SMP */
614 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
615 * @cpu: cpu that just started
617 * This function calls the cpu_chain notifiers with CPU_STARTING.
618 * It must be called by the arch code on the new cpu, before the new cpu
619 * enables interrupts and before the "boot" cpu returns from __cpu_up().
621 void notify_cpu_starting(unsigned int cpu)
623 unsigned long val = CPU_STARTING;
625 #ifdef CONFIG_PM_SLEEP_SMP
626 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
627 val = CPU_STARTING_FROZEN;
628 #endif /* CONFIG_PM_SLEEP_SMP */
629 cpu_notify(val, (void *)(long)cpu);
632 #endif /* CONFIG_SMP */
635 * cpu_bit_bitmap[] is a special, "compressed" data structure that
636 * represents all NR_CPUS bits binary values of 1<<nr.
638 * It is used by cpumask_of() to get a constant address to a CPU
639 * mask value that has a single bit set only.
642 /* cpu_bit_bitmap[0] is empty - so we can back into it */
643 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
644 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
645 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
646 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
648 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
650 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
651 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
652 #if BITS_PER_LONG > 32
653 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
654 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
657 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
659 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
660 EXPORT_SYMBOL(cpu_all_bits);
662 #ifdef CONFIG_INIT_ALL_POSSIBLE
663 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
666 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
668 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
669 EXPORT_SYMBOL(cpu_possible_mask);
671 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
672 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
673 EXPORT_SYMBOL(cpu_online_mask);
675 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
676 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
677 EXPORT_SYMBOL(cpu_present_mask);
679 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
680 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
681 EXPORT_SYMBOL(cpu_active_mask);
683 void set_cpu_possible(unsigned int cpu, bool possible)
686 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
688 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
691 void set_cpu_present(unsigned int cpu, bool present)
694 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
696 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
699 void set_cpu_online(unsigned int cpu, bool online)
702 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
704 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
707 void set_cpu_active(unsigned int cpu, bool active)
710 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
712 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
715 void init_cpu_present(const struct cpumask *src)
717 cpumask_copy(to_cpumask(cpu_present_bits), src);
720 void init_cpu_possible(const struct cpumask *src)
722 cpumask_copy(to_cpumask(cpu_possible_bits), src);
725 void init_cpu_online(const struct cpumask *src)
727 cpumask_copy(to_cpumask(cpu_online_bits), src);