2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/hotplug.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/smt.h>
14 #include <linux/unistd.h>
15 #include <linux/cpu.h>
16 #include <linux/oom.h>
17 #include <linux/rcupdate.h>
18 #include <linux/export.h>
19 #include <linux/bug.h>
20 #include <linux/kthread.h>
21 #include <linux/stop_machine.h>
22 #include <linux/mutex.h>
23 #include <linux/gfp.h>
24 #include <linux/suspend.h>
25 #include <linux/lockdep.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
28 #include <linux/nmi.h>
29 #include <linux/smpboot.h>
30 #include <linux/relay.h>
31 #include <linux/slab.h>
32 #include <linux/percpu-rwsem.h>
34 #include <trace/events/power.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpuhp.h>
41 * cpuhp_cpu_state - Per cpu hotplug state storage
42 * @state: The current cpu state
43 * @target: The target state
44 * @thread: Pointer to the hotplug thread
45 * @should_run: Thread should execute
46 * @rollback: Perform a rollback
47 * @single: Single callback invocation
48 * @bringup: Single callback bringup or teardown selector
49 * @cb_state: The state for a single callback (install/uninstall)
50 * @result: Result of the operation
51 * @done_up: Signal completion to the issuer of the task for cpu-up
52 * @done_down: Signal completion to the issuer of the task for cpu-down
54 struct cpuhp_cpu_state {
55 enum cpuhp_state state;
56 enum cpuhp_state target;
57 enum cpuhp_state fail;
59 struct task_struct *thread;
65 struct hlist_node *node;
66 struct hlist_node *last;
67 enum cpuhp_state cb_state;
69 struct completion done_up;
70 struct completion done_down;
74 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
75 .fail = CPUHP_INVALID,
78 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
79 static struct lockdep_map cpuhp_state_up_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
81 static struct lockdep_map cpuhp_state_down_map =
82 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
85 static inline void cpuhp_lock_acquire(bool bringup)
87 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
90 static inline void cpuhp_lock_release(bool bringup)
92 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
96 static inline void cpuhp_lock_acquire(bool bringup) { }
97 static inline void cpuhp_lock_release(bool bringup) { }
102 * cpuhp_step - Hotplug state machine step
103 * @name: Name of the step
104 * @startup: Startup function of the step
105 * @teardown: Teardown function of the step
106 * @cant_stop: Bringup/teardown can't be stopped at this step
111 int (*single)(unsigned int cpu);
112 int (*multi)(unsigned int cpu,
113 struct hlist_node *node);
116 int (*single)(unsigned int cpu);
117 int (*multi)(unsigned int cpu,
118 struct hlist_node *node);
120 struct hlist_head list;
125 static DEFINE_MUTEX(cpuhp_state_mutex);
126 static struct cpuhp_step cpuhp_hp_states[];
128 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
130 return cpuhp_hp_states + state;
134 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
135 * @cpu: The cpu for which the callback should be invoked
136 * @state: The state to do callbacks for
137 * @bringup: True if the bringup callback should be invoked
138 * @node: For multi-instance, do a single entry callback for install/remove
139 * @lastp: For multi-instance rollback, remember how far we got
141 * Called from cpu hotplug and from the state register machinery.
143 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
144 bool bringup, struct hlist_node *node,
145 struct hlist_node **lastp)
147 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
148 struct cpuhp_step *step = cpuhp_get_step(state);
149 int (*cbm)(unsigned int cpu, struct hlist_node *node);
150 int (*cb)(unsigned int cpu);
153 if (st->fail == state) {
154 st->fail = CPUHP_INVALID;
156 if (!(bringup ? step->startup.single : step->teardown.single))
162 if (!step->multi_instance) {
163 WARN_ON_ONCE(lastp && *lastp);
164 cb = bringup ? step->startup.single : step->teardown.single;
167 trace_cpuhp_enter(cpu, st->target, state, cb);
169 trace_cpuhp_exit(cpu, st->state, state, ret);
172 cbm = bringup ? step->startup.multi : step->teardown.multi;
176 /* Single invocation for instance add/remove */
178 WARN_ON_ONCE(lastp && *lastp);
179 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
180 ret = cbm(cpu, node);
181 trace_cpuhp_exit(cpu, st->state, state, ret);
185 /* State transition. Invoke on all instances */
187 hlist_for_each(node, &step->list) {
188 if (lastp && node == *lastp)
191 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
192 ret = cbm(cpu, node);
193 trace_cpuhp_exit(cpu, st->state, state, ret);
207 /* Rollback the instances if one failed */
208 cbm = !bringup ? step->startup.multi : step->teardown.multi;
212 hlist_for_each(node, &step->list) {
216 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
217 ret = cbm(cpu, node);
218 trace_cpuhp_exit(cpu, st->state, state, ret);
220 * Rollback must not fail,
228 static bool cpuhp_is_ap_state(enum cpuhp_state state)
231 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
232 * purposes as that state is handled explicitly in cpu_down.
234 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
237 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
239 struct completion *done = bringup ? &st->done_up : &st->done_down;
240 wait_for_completion(done);
243 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
245 struct completion *done = bringup ? &st->done_up : &st->done_down;
250 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
252 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
254 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
257 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
258 static DEFINE_MUTEX(cpu_add_remove_lock);
259 bool cpuhp_tasks_frozen;
260 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
263 * The following two APIs (cpu_maps_update_begin/done) must be used when
264 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
266 void cpu_maps_update_begin(void)
268 mutex_lock(&cpu_add_remove_lock);
271 void cpu_maps_update_done(void)
273 mutex_unlock(&cpu_add_remove_lock);
277 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
278 * Should always be manipulated under cpu_add_remove_lock
280 static int cpu_hotplug_disabled;
282 #ifdef CONFIG_HOTPLUG_CPU
284 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
286 void cpus_read_lock(void)
288 percpu_down_read(&cpu_hotplug_lock);
290 EXPORT_SYMBOL_GPL(cpus_read_lock);
292 int cpus_read_trylock(void)
294 return percpu_down_read_trylock(&cpu_hotplug_lock);
296 EXPORT_SYMBOL_GPL(cpus_read_trylock);
298 void cpus_read_unlock(void)
300 percpu_up_read(&cpu_hotplug_lock);
302 EXPORT_SYMBOL_GPL(cpus_read_unlock);
304 void cpus_write_lock(void)
306 percpu_down_write(&cpu_hotplug_lock);
309 void cpus_write_unlock(void)
311 percpu_up_write(&cpu_hotplug_lock);
314 void lockdep_assert_cpus_held(void)
317 * We can't have hotplug operations before userspace starts running,
318 * and some init codepaths will knowingly not take the hotplug lock.
319 * This is all valid, so mute lockdep until it makes sense to report
322 if (system_state < SYSTEM_RUNNING)
325 percpu_rwsem_assert_held(&cpu_hotplug_lock);
329 * Wait for currently running CPU hotplug operations to complete (if any) and
330 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
331 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
332 * hotplug path before performing hotplug operations. So acquiring that lock
333 * guarantees mutual exclusion from any currently running hotplug operations.
335 void cpu_hotplug_disable(void)
337 cpu_maps_update_begin();
338 cpu_hotplug_disabled++;
339 cpu_maps_update_done();
341 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
343 static void __cpu_hotplug_enable(void)
345 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 cpu_hotplug_disabled--;
350 void cpu_hotplug_enable(void)
352 cpu_maps_update_begin();
353 __cpu_hotplug_enable();
354 cpu_maps_update_done();
356 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
357 #endif /* CONFIG_HOTPLUG_CPU */
360 * Architectures that need SMT-specific errata handling during SMT hotplug
361 * should override this.
363 void __weak arch_smt_update(void) { }
365 #ifdef CONFIG_HOTPLUG_SMT
366 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
368 void __init cpu_smt_disable(bool force)
370 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
371 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
375 pr_info("SMT: Force disabled\n");
376 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
378 pr_info("SMT: disabled\n");
379 cpu_smt_control = CPU_SMT_DISABLED;
384 * The decision whether SMT is supported can only be done after the full
385 * CPU identification. Called from architecture code.
387 void __init cpu_smt_check_topology(void)
389 if (!topology_smt_supported())
390 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
393 static int __init smt_cmdline_disable(char *str)
395 cpu_smt_disable(str && !strcmp(str, "force"));
398 early_param("nosmt", smt_cmdline_disable);
400 static inline bool cpu_smt_allowed(unsigned int cpu)
402 if (cpu_smt_control == CPU_SMT_ENABLED)
405 if (topology_is_primary_thread(cpu))
409 * On x86 it's required to boot all logical CPUs at least once so
410 * that the init code can get a chance to set CR4.MCE on each
411 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
412 * core will shutdown the machine.
414 return !per_cpu(cpuhp_state, cpu).booted_once;
417 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
420 static inline enum cpuhp_state
421 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
423 enum cpuhp_state prev_state = st->state;
425 st->rollback = false;
430 st->bringup = st->state < target;
436 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
441 * If we have st->last we need to undo partial multi_instance of this
442 * state first. Otherwise start undo at the previous state.
451 st->target = prev_state;
452 st->bringup = !st->bringup;
455 /* Regular hotplug invocation of the AP hotplug thread */
456 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
458 if (!st->single && st->state == st->target)
463 * Make sure the above stores are visible before should_run becomes
464 * true. Paired with the mb() above in cpuhp_thread_fun()
467 st->should_run = true;
468 wake_up_process(st->thread);
469 wait_for_ap_thread(st, st->bringup);
472 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
474 enum cpuhp_state prev_state;
477 prev_state = cpuhp_set_state(st, target);
479 if ((ret = st->result)) {
480 cpuhp_reset_state(st, prev_state);
487 static int bringup_wait_for_ap(unsigned int cpu)
489 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
491 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
492 wait_for_ap_thread(st, true);
493 if (WARN_ON_ONCE((!cpu_online(cpu))))
496 /* Unpark the stopper thread and the hotplug thread of the target cpu */
497 stop_machine_unpark(cpu);
498 kthread_unpark(st->thread);
501 * SMT soft disabling on X86 requires to bring the CPU out of the
502 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
503 * CPU marked itself as booted_once in cpu_notify_starting() so the
504 * cpu_smt_allowed() check will now return false if this is not the
507 if (!cpu_smt_allowed(cpu))
510 if (st->target <= CPUHP_AP_ONLINE_IDLE)
513 return cpuhp_kick_ap(st, st->target);
516 static int bringup_cpu(unsigned int cpu)
518 struct task_struct *idle = idle_thread_get(cpu);
522 * Some architectures have to walk the irq descriptors to
523 * setup the vector space for the cpu which comes online.
524 * Prevent irq alloc/free across the bringup.
528 /* Arch-specific enabling code. */
529 ret = __cpu_up(cpu, idle);
533 return bringup_wait_for_ap(cpu);
537 * Hotplug state machine related functions
540 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
542 for (st->state--; st->state > st->target; st->state--)
543 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
546 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
548 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
551 * When CPU hotplug is disabled, then taking the CPU down is not
552 * possible because takedown_cpu() and the architecture and
553 * subsystem specific mechanisms are not available. So the CPU
554 * which would be completely unplugged again needs to stay around
555 * in the current state.
557 return st->state <= CPUHP_BRINGUP_CPU;
560 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
561 enum cpuhp_state target)
563 enum cpuhp_state prev_state = st->state;
566 while (st->state < target) {
568 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
570 if (can_rollback_cpu(st)) {
571 st->target = prev_state;
572 undo_cpu_up(cpu, st);
581 * The cpu hotplug threads manage the bringup and teardown of the cpus
583 static void cpuhp_create(unsigned int cpu)
585 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
587 init_completion(&st->done_up);
588 init_completion(&st->done_down);
591 static int cpuhp_should_run(unsigned int cpu)
593 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
595 return st->should_run;
599 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
600 * callbacks when a state gets [un]installed at runtime.
602 * Each invocation of this function by the smpboot thread does a single AP
605 * It has 3 modes of operation:
606 * - single: runs st->cb_state
607 * - up: runs ++st->state, while st->state < st->target
608 * - down: runs st->state--, while st->state > st->target
610 * When complete or on error, should_run is cleared and the completion is fired.
612 static void cpuhp_thread_fun(unsigned int cpu)
614 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
615 bool bringup = st->bringup;
616 enum cpuhp_state state;
618 if (WARN_ON_ONCE(!st->should_run))
622 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
623 * that if we see ->should_run we also see the rest of the state.
627 cpuhp_lock_acquire(bringup);
630 state = st->cb_state;
631 st->should_run = false;
636 st->should_run = (st->state < st->target);
637 WARN_ON_ONCE(st->state > st->target);
641 st->should_run = (st->state > st->target);
642 WARN_ON_ONCE(st->state < st->target);
646 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
648 if (cpuhp_is_atomic_state(state)) {
650 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
654 * STARTING/DYING must not fail!
656 WARN_ON_ONCE(st->result);
658 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
663 * If we fail on a rollback, we're up a creek without no
664 * paddle, no way forward, no way back. We loose, thanks for
667 WARN_ON_ONCE(st->rollback);
668 st->should_run = false;
671 cpuhp_lock_release(bringup);
674 complete_ap_thread(st, bringup);
677 /* Invoke a single callback on a remote cpu */
679 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
680 struct hlist_node *node)
682 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
685 if (!cpu_online(cpu))
688 cpuhp_lock_acquire(false);
689 cpuhp_lock_release(false);
691 cpuhp_lock_acquire(true);
692 cpuhp_lock_release(true);
695 * If we are up and running, use the hotplug thread. For early calls
696 * we invoke the thread function directly.
699 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
701 st->rollback = false;
705 st->bringup = bringup;
706 st->cb_state = state;
712 * If we failed and did a partial, do a rollback.
714 if ((ret = st->result) && st->last) {
716 st->bringup = !bringup;
722 * Clean up the leftovers so the next hotplug operation wont use stale
725 st->node = st->last = NULL;
729 static int cpuhp_kick_ap_work(unsigned int cpu)
731 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
732 enum cpuhp_state prev_state = st->state;
735 cpuhp_lock_acquire(false);
736 cpuhp_lock_release(false);
738 cpuhp_lock_acquire(true);
739 cpuhp_lock_release(true);
741 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
742 ret = cpuhp_kick_ap(st, st->target);
743 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
748 static struct smp_hotplug_thread cpuhp_threads = {
749 .store = &cpuhp_state.thread,
750 .create = &cpuhp_create,
751 .thread_should_run = cpuhp_should_run,
752 .thread_fn = cpuhp_thread_fun,
753 .thread_comm = "cpuhp/%u",
757 void __init cpuhp_threads_init(void)
759 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
760 kthread_unpark(this_cpu_read(cpuhp_state.thread));
763 #ifdef CONFIG_HOTPLUG_CPU
765 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
768 * This function walks all processes, finds a valid mm struct for each one and
769 * then clears a corresponding bit in mm's cpumask. While this all sounds
770 * trivial, there are various non-obvious corner cases, which this function
771 * tries to solve in a safe manner.
773 * Also note that the function uses a somewhat relaxed locking scheme, so it may
774 * be called only for an already offlined CPU.
776 void clear_tasks_mm_cpumask(int cpu)
778 struct task_struct *p;
781 * This function is called after the cpu is taken down and marked
782 * offline, so its not like new tasks will ever get this cpu set in
783 * their mm mask. -- Peter Zijlstra
784 * Thus, we may use rcu_read_lock() here, instead of grabbing
785 * full-fledged tasklist_lock.
787 WARN_ON(cpu_online(cpu));
789 for_each_process(p) {
790 struct task_struct *t;
793 * Main thread might exit, but other threads may still have
794 * a valid mm. Find one.
796 t = find_lock_task_mm(p);
799 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
805 /* Take this CPU down. */
806 static int take_cpu_down(void *_param)
808 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
809 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
810 int err, cpu = smp_processor_id();
813 /* Ensure this CPU doesn't handle any more interrupts. */
814 err = __cpu_disable();
819 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
820 * do this step again.
822 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
824 /* Invoke the former CPU_DYING callbacks */
825 for (; st->state > target; st->state--) {
826 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
828 * DYING must not fail!
833 /* Give up timekeeping duties */
834 tick_handover_do_timer();
835 /* Park the stopper thread */
836 stop_machine_park(cpu);
840 static int takedown_cpu(unsigned int cpu)
842 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
845 /* Park the smpboot threads */
846 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
849 * Prevent irq alloc/free while the dying cpu reorganizes the
850 * interrupt affinities.
855 * So now all preempt/rcu users must observe !cpu_active().
857 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
859 /* CPU refused to die */
861 /* Unpark the hotplug thread so we can rollback there */
862 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
865 BUG_ON(cpu_online(cpu));
868 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
869 * all runnable tasks from the CPU, there's only the idle task left now
870 * that the migration thread is done doing the stop_machine thing.
872 * Wait for the stop thread to go away.
874 wait_for_ap_thread(st, false);
875 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
877 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
880 hotplug_cpu__broadcast_tick_pull(cpu);
881 /* This actually kills the CPU. */
884 tick_cleanup_dead_cpu(cpu);
885 rcutree_migrate_callbacks(cpu);
889 static void cpuhp_complete_idle_dead(void *arg)
891 struct cpuhp_cpu_state *st = arg;
893 complete_ap_thread(st, false);
896 void cpuhp_report_idle_dead(void)
898 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
900 BUG_ON(st->state != CPUHP_AP_OFFLINE);
901 rcu_report_dead(smp_processor_id());
902 st->state = CPUHP_AP_IDLE_DEAD;
904 * We cannot call complete after rcu_report_dead() so we delegate it
907 smp_call_function_single(cpumask_first(cpu_online_mask),
908 cpuhp_complete_idle_dead, st, 0);
911 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
913 for (st->state++; st->state < st->target; st->state++)
914 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
917 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
918 enum cpuhp_state target)
920 enum cpuhp_state prev_state = st->state;
923 for (; st->state > target; st->state--) {
924 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
926 st->target = prev_state;
927 if (st->state < prev_state)
928 undo_cpu_down(cpu, st);
935 /* Requires cpu_add_remove_lock to be held */
936 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
937 enum cpuhp_state target)
939 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
940 int prev_state, ret = 0;
942 if (num_online_cpus() == 1)
945 if (!cpu_present(cpu))
950 cpuhp_tasks_frozen = tasks_frozen;
952 prev_state = cpuhp_set_state(st, target);
954 * If the current CPU state is in the range of the AP hotplug thread,
955 * then we need to kick the thread.
957 if (st->state > CPUHP_TEARDOWN_CPU) {
958 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
959 ret = cpuhp_kick_ap_work(cpu);
961 * The AP side has done the error rollback already. Just
962 * return the error code..
968 * We might have stopped still in the range of the AP hotplug
969 * thread. Nothing to do anymore.
971 if (st->state > CPUHP_TEARDOWN_CPU)
977 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
978 * to do the further cleanups.
980 ret = cpuhp_down_callbacks(cpu, st, target);
981 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
982 cpuhp_reset_state(st, prev_state);
989 * Do post unplug cleanup. This is still protected against
990 * concurrent CPU hotplug via cpu_add_remove_lock.
992 lockup_detector_cleanup();
997 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
999 if (cpu_hotplug_disabled)
1001 return _cpu_down(cpu, 0, target);
1004 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1008 cpu_maps_update_begin();
1009 err = cpu_down_maps_locked(cpu, target);
1010 cpu_maps_update_done();
1014 int cpu_down(unsigned int cpu)
1016 return do_cpu_down(cpu, CPUHP_OFFLINE);
1018 EXPORT_SYMBOL(cpu_down);
1021 #define takedown_cpu NULL
1022 #endif /*CONFIG_HOTPLUG_CPU*/
1025 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1026 * @cpu: cpu that just started
1028 * It must be called by the arch code on the new cpu, before the new cpu
1029 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1031 void notify_cpu_starting(unsigned int cpu)
1033 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1034 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1037 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1038 st->booted_once = true;
1039 while (st->state < target) {
1041 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1043 * STARTING must not fail!
1050 * Called from the idle task. Wake up the controlling task which brings the
1051 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1052 * the rest of the online bringup to the hotplug thread.
1054 void cpuhp_online_idle(enum cpuhp_state state)
1056 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1058 /* Happens for the boot cpu */
1059 if (state != CPUHP_AP_ONLINE_IDLE)
1062 st->state = CPUHP_AP_ONLINE_IDLE;
1063 complete_ap_thread(st, true);
1066 /* Requires cpu_add_remove_lock to be held */
1067 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1069 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1070 struct task_struct *idle;
1075 if (!cpu_present(cpu)) {
1081 * The caller of do_cpu_up might have raced with another
1082 * caller. Ignore it for now.
1084 if (st->state >= target)
1087 if (st->state == CPUHP_OFFLINE) {
1088 /* Let it fail before we try to bring the cpu up */
1089 idle = idle_thread_get(cpu);
1091 ret = PTR_ERR(idle);
1096 cpuhp_tasks_frozen = tasks_frozen;
1098 cpuhp_set_state(st, target);
1100 * If the current CPU state is in the range of the AP hotplug thread,
1101 * then we need to kick the thread once more.
1103 if (st->state > CPUHP_BRINGUP_CPU) {
1104 ret = cpuhp_kick_ap_work(cpu);
1106 * The AP side has done the error rollback already. Just
1107 * return the error code..
1114 * Try to reach the target state. We max out on the BP at
1115 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1116 * responsible for bringing it up to the target state.
1118 target = min((int)target, CPUHP_BRINGUP_CPU);
1119 ret = cpuhp_up_callbacks(cpu, st, target);
1121 cpus_write_unlock();
1126 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1130 if (!cpu_possible(cpu)) {
1131 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1133 #if defined(CONFIG_IA64)
1134 pr_err("please check additional_cpus= boot parameter\n");
1139 err = try_online_node(cpu_to_node(cpu));
1143 cpu_maps_update_begin();
1145 if (cpu_hotplug_disabled) {
1149 if (!cpu_smt_allowed(cpu)) {
1154 err = _cpu_up(cpu, 0, target);
1156 cpu_maps_update_done();
1160 int cpu_up(unsigned int cpu)
1162 return do_cpu_up(cpu, CPUHP_ONLINE);
1164 EXPORT_SYMBOL_GPL(cpu_up);
1166 #ifdef CONFIG_PM_SLEEP_SMP
1167 static cpumask_var_t frozen_cpus;
1169 int freeze_secondary_cpus(int primary)
1173 cpu_maps_update_begin();
1174 if (!cpu_online(primary))
1175 primary = cpumask_first(cpu_online_mask);
1177 * We take down all of the non-boot CPUs in one shot to avoid races
1178 * with the userspace trying to use the CPU hotplug at the same time
1180 cpumask_clear(frozen_cpus);
1182 pr_info("Disabling non-boot CPUs ...\n");
1183 for_each_online_cpu(cpu) {
1186 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1187 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1188 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1190 cpumask_set_cpu(cpu, frozen_cpus);
1192 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1198 BUG_ON(num_online_cpus() > 1);
1200 pr_err("Non-boot CPUs are not disabled\n");
1203 * Make sure the CPUs won't be enabled by someone else. We need to do
1204 * this even in case of failure as all disable_nonboot_cpus() users are
1205 * supposed to do enable_nonboot_cpus() on the failure path.
1207 cpu_hotplug_disabled++;
1209 cpu_maps_update_done();
1213 void __weak arch_enable_nonboot_cpus_begin(void)
1217 void __weak arch_enable_nonboot_cpus_end(void)
1221 void enable_nonboot_cpus(void)
1225 /* Allow everyone to use the CPU hotplug again */
1226 cpu_maps_update_begin();
1227 __cpu_hotplug_enable();
1228 if (cpumask_empty(frozen_cpus))
1231 pr_info("Enabling non-boot CPUs ...\n");
1233 arch_enable_nonboot_cpus_begin();
1235 for_each_cpu(cpu, frozen_cpus) {
1236 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1237 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1238 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1240 pr_info("CPU%d is up\n", cpu);
1243 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1246 arch_enable_nonboot_cpus_end();
1248 cpumask_clear(frozen_cpus);
1250 cpu_maps_update_done();
1253 static int __init alloc_frozen_cpus(void)
1255 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1259 core_initcall(alloc_frozen_cpus);
1262 * When callbacks for CPU hotplug notifications are being executed, we must
1263 * ensure that the state of the system with respect to the tasks being frozen
1264 * or not, as reported by the notification, remains unchanged *throughout the
1265 * duration* of the execution of the callbacks.
1266 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1268 * This synchronization is implemented by mutually excluding regular CPU
1269 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1270 * Hibernate notifications.
1273 cpu_hotplug_pm_callback(struct notifier_block *nb,
1274 unsigned long action, void *ptr)
1278 case PM_SUSPEND_PREPARE:
1279 case PM_HIBERNATION_PREPARE:
1280 cpu_hotplug_disable();
1283 case PM_POST_SUSPEND:
1284 case PM_POST_HIBERNATION:
1285 cpu_hotplug_enable();
1296 static int __init cpu_hotplug_pm_sync_init(void)
1299 * cpu_hotplug_pm_callback has higher priority than x86
1300 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1301 * to disable cpu hotplug to avoid cpu hotplug race.
1303 pm_notifier(cpu_hotplug_pm_callback, 0);
1306 core_initcall(cpu_hotplug_pm_sync_init);
1308 #endif /* CONFIG_PM_SLEEP_SMP */
1312 #endif /* CONFIG_SMP */
1314 /* Boot processor state steps */
1315 static struct cpuhp_step cpuhp_hp_states[] = {
1318 .startup.single = NULL,
1319 .teardown.single = NULL,
1322 [CPUHP_CREATE_THREADS]= {
1323 .name = "threads:prepare",
1324 .startup.single = smpboot_create_threads,
1325 .teardown.single = NULL,
1328 [CPUHP_PERF_PREPARE] = {
1329 .name = "perf:prepare",
1330 .startup.single = perf_event_init_cpu,
1331 .teardown.single = perf_event_exit_cpu,
1333 [CPUHP_WORKQUEUE_PREP] = {
1334 .name = "workqueue:prepare",
1335 .startup.single = workqueue_prepare_cpu,
1336 .teardown.single = NULL,
1338 [CPUHP_HRTIMERS_PREPARE] = {
1339 .name = "hrtimers:prepare",
1340 .startup.single = hrtimers_prepare_cpu,
1341 .teardown.single = hrtimers_dead_cpu,
1343 [CPUHP_SMPCFD_PREPARE] = {
1344 .name = "smpcfd:prepare",
1345 .startup.single = smpcfd_prepare_cpu,
1346 .teardown.single = smpcfd_dead_cpu,
1348 [CPUHP_RELAY_PREPARE] = {
1349 .name = "relay:prepare",
1350 .startup.single = relay_prepare_cpu,
1351 .teardown.single = NULL,
1353 [CPUHP_SLAB_PREPARE] = {
1354 .name = "slab:prepare",
1355 .startup.single = slab_prepare_cpu,
1356 .teardown.single = slab_dead_cpu,
1358 [CPUHP_RCUTREE_PREP] = {
1359 .name = "RCU/tree:prepare",
1360 .startup.single = rcutree_prepare_cpu,
1361 .teardown.single = rcutree_dead_cpu,
1364 * On the tear-down path, timers_dead_cpu() must be invoked
1365 * before blk_mq_queue_reinit_notify() from notify_dead(),
1366 * otherwise a RCU stall occurs.
1368 [CPUHP_TIMERS_PREPARE] = {
1369 .name = "timers:prepare",
1370 .startup.single = timers_prepare_cpu,
1371 .teardown.single = timers_dead_cpu,
1373 /* Kicks the plugged cpu into life */
1374 [CPUHP_BRINGUP_CPU] = {
1375 .name = "cpu:bringup",
1376 .startup.single = bringup_cpu,
1377 .teardown.single = NULL,
1380 /* Final state before CPU kills itself */
1381 [CPUHP_AP_IDLE_DEAD] = {
1382 .name = "idle:dead",
1385 * Last state before CPU enters the idle loop to die. Transient state
1386 * for synchronization.
1388 [CPUHP_AP_OFFLINE] = {
1389 .name = "ap:offline",
1392 /* First state is scheduler control. Interrupts are disabled */
1393 [CPUHP_AP_SCHED_STARTING] = {
1394 .name = "sched:starting",
1395 .startup.single = sched_cpu_starting,
1396 .teardown.single = sched_cpu_dying,
1398 [CPUHP_AP_RCUTREE_DYING] = {
1399 .name = "RCU/tree:dying",
1400 .startup.single = NULL,
1401 .teardown.single = rcutree_dying_cpu,
1403 [CPUHP_AP_SMPCFD_DYING] = {
1404 .name = "smpcfd:dying",
1405 .startup.single = NULL,
1406 .teardown.single = smpcfd_dying_cpu,
1408 /* Entry state on starting. Interrupts enabled from here on. Transient
1409 * state for synchronsization */
1410 [CPUHP_AP_ONLINE] = {
1411 .name = "ap:online",
1414 * Handled on controll processor until the plugged processor manages
1417 [CPUHP_TEARDOWN_CPU] = {
1418 .name = "cpu:teardown",
1419 .startup.single = NULL,
1420 .teardown.single = takedown_cpu,
1423 /* Handle smpboot threads park/unpark */
1424 [CPUHP_AP_SMPBOOT_THREADS] = {
1425 .name = "smpboot/threads:online",
1426 .startup.single = smpboot_unpark_threads,
1427 .teardown.single = smpboot_park_threads,
1429 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1430 .name = "irq/affinity:online",
1431 .startup.single = irq_affinity_online_cpu,
1432 .teardown.single = NULL,
1434 [CPUHP_AP_PERF_ONLINE] = {
1435 .name = "perf:online",
1436 .startup.single = perf_event_init_cpu,
1437 .teardown.single = perf_event_exit_cpu,
1439 [CPUHP_AP_WATCHDOG_ONLINE] = {
1440 .name = "lockup_detector:online",
1441 .startup.single = lockup_detector_online_cpu,
1442 .teardown.single = lockup_detector_offline_cpu,
1444 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1445 .name = "workqueue:online",
1446 .startup.single = workqueue_online_cpu,
1447 .teardown.single = workqueue_offline_cpu,
1449 [CPUHP_AP_RCUTREE_ONLINE] = {
1450 .name = "RCU/tree:online",
1451 .startup.single = rcutree_online_cpu,
1452 .teardown.single = rcutree_offline_cpu,
1456 * The dynamically registered state space is here
1460 /* Last state is scheduler control setting the cpu active */
1461 [CPUHP_AP_ACTIVE] = {
1462 .name = "sched:active",
1463 .startup.single = sched_cpu_activate,
1464 .teardown.single = sched_cpu_deactivate,
1468 /* CPU is fully up and running. */
1471 .startup.single = NULL,
1472 .teardown.single = NULL,
1476 /* Sanity check for callbacks */
1477 static int cpuhp_cb_check(enum cpuhp_state state)
1479 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1485 * Returns a free for dynamic slot assignment of the Online state. The states
1486 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1487 * by having no name assigned.
1489 static int cpuhp_reserve_state(enum cpuhp_state state)
1491 enum cpuhp_state i, end;
1492 struct cpuhp_step *step;
1495 case CPUHP_AP_ONLINE_DYN:
1496 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1497 end = CPUHP_AP_ONLINE_DYN_END;
1499 case CPUHP_BP_PREPARE_DYN:
1500 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1501 end = CPUHP_BP_PREPARE_DYN_END;
1507 for (i = state; i <= end; i++, step++) {
1511 WARN(1, "No more dynamic states available for CPU hotplug\n");
1515 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1516 int (*startup)(unsigned int cpu),
1517 int (*teardown)(unsigned int cpu),
1518 bool multi_instance)
1520 /* (Un)Install the callbacks for further cpu hotplug operations */
1521 struct cpuhp_step *sp;
1525 * If name is NULL, then the state gets removed.
1527 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1528 * the first allocation from these dynamic ranges, so the removal
1529 * would trigger a new allocation and clear the wrong (already
1530 * empty) state, leaving the callbacks of the to be cleared state
1531 * dangling, which causes wreckage on the next hotplug operation.
1533 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1534 state == CPUHP_BP_PREPARE_DYN)) {
1535 ret = cpuhp_reserve_state(state);
1540 sp = cpuhp_get_step(state);
1541 if (name && sp->name)
1544 sp->startup.single = startup;
1545 sp->teardown.single = teardown;
1547 sp->multi_instance = multi_instance;
1548 INIT_HLIST_HEAD(&sp->list);
1552 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1554 return cpuhp_get_step(state)->teardown.single;
1558 * Call the startup/teardown function for a step either on the AP or
1559 * on the current CPU.
1561 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1562 struct hlist_node *node)
1564 struct cpuhp_step *sp = cpuhp_get_step(state);
1568 * If there's nothing to do, we done.
1569 * Relies on the union for multi_instance.
1571 if ((bringup && !sp->startup.single) ||
1572 (!bringup && !sp->teardown.single))
1575 * The non AP bound callbacks can fail on bringup. On teardown
1576 * e.g. module removal we crash for now.
1579 if (cpuhp_is_ap_state(state))
1580 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1582 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1584 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1586 BUG_ON(ret && !bringup);
1591 * Called from __cpuhp_setup_state on a recoverable failure.
1593 * Note: The teardown callbacks for rollback are not allowed to fail!
1595 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1596 struct hlist_node *node)
1600 /* Roll back the already executed steps on the other cpus */
1601 for_each_present_cpu(cpu) {
1602 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1603 int cpustate = st->state;
1605 if (cpu >= failedcpu)
1608 /* Did we invoke the startup call on that cpu ? */
1609 if (cpustate >= state)
1610 cpuhp_issue_call(cpu, state, false, node);
1614 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1615 struct hlist_node *node,
1618 struct cpuhp_step *sp;
1622 lockdep_assert_cpus_held();
1624 sp = cpuhp_get_step(state);
1625 if (sp->multi_instance == false)
1628 mutex_lock(&cpuhp_state_mutex);
1630 if (!invoke || !sp->startup.multi)
1634 * Try to call the startup callback for each present cpu
1635 * depending on the hotplug state of the cpu.
1637 for_each_present_cpu(cpu) {
1638 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1639 int cpustate = st->state;
1641 if (cpustate < state)
1644 ret = cpuhp_issue_call(cpu, state, true, node);
1646 if (sp->teardown.multi)
1647 cpuhp_rollback_install(cpu, state, node);
1653 hlist_add_head(node, &sp->list);
1655 mutex_unlock(&cpuhp_state_mutex);
1659 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1665 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1669 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1672 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1673 * @state: The state to setup
1674 * @invoke: If true, the startup function is invoked for cpus where
1675 * cpu state >= @state
1676 * @startup: startup callback function
1677 * @teardown: teardown callback function
1678 * @multi_instance: State is set up for multiple instances which get
1681 * The caller needs to hold cpus read locked while calling this function.
1684 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1685 * 0 for all other states
1686 * On failure: proper (negative) error code
1688 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1689 const char *name, bool invoke,
1690 int (*startup)(unsigned int cpu),
1691 int (*teardown)(unsigned int cpu),
1692 bool multi_instance)
1697 lockdep_assert_cpus_held();
1699 if (cpuhp_cb_check(state) || !name)
1702 mutex_lock(&cpuhp_state_mutex);
1704 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1707 dynstate = state == CPUHP_AP_ONLINE_DYN;
1708 if (ret > 0 && dynstate) {
1713 if (ret || !invoke || !startup)
1717 * Try to call the startup callback for each present cpu
1718 * depending on the hotplug state of the cpu.
1720 for_each_present_cpu(cpu) {
1721 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1722 int cpustate = st->state;
1724 if (cpustate < state)
1727 ret = cpuhp_issue_call(cpu, state, true, NULL);
1730 cpuhp_rollback_install(cpu, state, NULL);
1731 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1736 mutex_unlock(&cpuhp_state_mutex);
1738 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1739 * dynamically allocated state in case of success.
1741 if (!ret && dynstate)
1745 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1747 int __cpuhp_setup_state(enum cpuhp_state state,
1748 const char *name, bool invoke,
1749 int (*startup)(unsigned int cpu),
1750 int (*teardown)(unsigned int cpu),
1751 bool multi_instance)
1756 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1757 teardown, multi_instance);
1761 EXPORT_SYMBOL(__cpuhp_setup_state);
1763 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1764 struct hlist_node *node, bool invoke)
1766 struct cpuhp_step *sp = cpuhp_get_step(state);
1769 BUG_ON(cpuhp_cb_check(state));
1771 if (!sp->multi_instance)
1775 mutex_lock(&cpuhp_state_mutex);
1777 if (!invoke || !cpuhp_get_teardown_cb(state))
1780 * Call the teardown callback for each present cpu depending
1781 * on the hotplug state of the cpu. This function is not
1782 * allowed to fail currently!
1784 for_each_present_cpu(cpu) {
1785 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1786 int cpustate = st->state;
1788 if (cpustate >= state)
1789 cpuhp_issue_call(cpu, state, false, node);
1794 mutex_unlock(&cpuhp_state_mutex);
1799 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1802 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1803 * @state: The state to remove
1804 * @invoke: If true, the teardown function is invoked for cpus where
1805 * cpu state >= @state
1807 * The caller needs to hold cpus read locked while calling this function.
1808 * The teardown callback is currently not allowed to fail. Think
1809 * about module removal!
1811 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1813 struct cpuhp_step *sp = cpuhp_get_step(state);
1816 BUG_ON(cpuhp_cb_check(state));
1818 lockdep_assert_cpus_held();
1820 mutex_lock(&cpuhp_state_mutex);
1821 if (sp->multi_instance) {
1822 WARN(!hlist_empty(&sp->list),
1823 "Error: Removing state %d which has instances left.\n",
1828 if (!invoke || !cpuhp_get_teardown_cb(state))
1832 * Call the teardown callback for each present cpu depending
1833 * on the hotplug state of the cpu. This function is not
1834 * allowed to fail currently!
1836 for_each_present_cpu(cpu) {
1837 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1838 int cpustate = st->state;
1840 if (cpustate >= state)
1841 cpuhp_issue_call(cpu, state, false, NULL);
1844 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1845 mutex_unlock(&cpuhp_state_mutex);
1847 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1849 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1852 __cpuhp_remove_state_cpuslocked(state, invoke);
1855 EXPORT_SYMBOL(__cpuhp_remove_state);
1857 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1858 static ssize_t show_cpuhp_state(struct device *dev,
1859 struct device_attribute *attr, char *buf)
1861 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1863 return sprintf(buf, "%d\n", st->state);
1865 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1867 static ssize_t write_cpuhp_target(struct device *dev,
1868 struct device_attribute *attr,
1869 const char *buf, size_t count)
1871 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1872 struct cpuhp_step *sp;
1875 ret = kstrtoint(buf, 10, &target);
1879 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1880 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1883 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1887 ret = lock_device_hotplug_sysfs();
1891 mutex_lock(&cpuhp_state_mutex);
1892 sp = cpuhp_get_step(target);
1893 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1894 mutex_unlock(&cpuhp_state_mutex);
1898 if (st->state < target)
1899 ret = do_cpu_up(dev->id, target);
1901 ret = do_cpu_down(dev->id, target);
1903 unlock_device_hotplug();
1904 return ret ? ret : count;
1907 static ssize_t show_cpuhp_target(struct device *dev,
1908 struct device_attribute *attr, char *buf)
1910 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1912 return sprintf(buf, "%d\n", st->target);
1914 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1917 static ssize_t write_cpuhp_fail(struct device *dev,
1918 struct device_attribute *attr,
1919 const char *buf, size_t count)
1921 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1922 struct cpuhp_step *sp;
1925 ret = kstrtoint(buf, 10, &fail);
1929 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
1933 * Cannot fail STARTING/DYING callbacks.
1935 if (cpuhp_is_atomic_state(fail))
1939 * Cannot fail anything that doesn't have callbacks.
1941 mutex_lock(&cpuhp_state_mutex);
1942 sp = cpuhp_get_step(fail);
1943 if (!sp->startup.single && !sp->teardown.single)
1945 mutex_unlock(&cpuhp_state_mutex);
1954 static ssize_t show_cpuhp_fail(struct device *dev,
1955 struct device_attribute *attr, char *buf)
1957 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1959 return sprintf(buf, "%d\n", st->fail);
1962 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1964 static struct attribute *cpuhp_cpu_attrs[] = {
1965 &dev_attr_state.attr,
1966 &dev_attr_target.attr,
1967 &dev_attr_fail.attr,
1971 static const struct attribute_group cpuhp_cpu_attr_group = {
1972 .attrs = cpuhp_cpu_attrs,
1977 static ssize_t show_cpuhp_states(struct device *dev,
1978 struct device_attribute *attr, char *buf)
1980 ssize_t cur, res = 0;
1983 mutex_lock(&cpuhp_state_mutex);
1984 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1985 struct cpuhp_step *sp = cpuhp_get_step(i);
1988 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1993 mutex_unlock(&cpuhp_state_mutex);
1996 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1998 static struct attribute *cpuhp_cpu_root_attrs[] = {
1999 &dev_attr_states.attr,
2003 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2004 .attrs = cpuhp_cpu_root_attrs,
2009 #ifdef CONFIG_HOTPLUG_SMT
2011 static const char *smt_states[] = {
2012 [CPU_SMT_ENABLED] = "on",
2013 [CPU_SMT_DISABLED] = "off",
2014 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2015 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2019 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2021 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2024 static void cpuhp_offline_cpu_device(unsigned int cpu)
2026 struct device *dev = get_cpu_device(cpu);
2028 dev->offline = true;
2029 /* Tell user space about the state change */
2030 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2033 static void cpuhp_online_cpu_device(unsigned int cpu)
2035 struct device *dev = get_cpu_device(cpu);
2037 dev->offline = false;
2038 /* Tell user space about the state change */
2039 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2042 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2046 cpu_maps_update_begin();
2047 for_each_online_cpu(cpu) {
2048 if (topology_is_primary_thread(cpu))
2050 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2054 * As this needs to hold the cpu maps lock it's impossible
2055 * to call device_offline() because that ends up calling
2056 * cpu_down() which takes cpu maps lock. cpu maps lock
2057 * needs to be held as this might race against in kernel
2058 * abusers of the hotplug machinery (thermal management).
2060 * So nothing would update device:offline state. That would
2061 * leave the sysfs entry stale and prevent onlining after
2062 * smt control has been changed to 'off' again. This is
2063 * called under the sysfs hotplug lock, so it is properly
2064 * serialized against the regular offline usage.
2066 cpuhp_offline_cpu_device(cpu);
2069 cpu_smt_control = ctrlval;
2072 cpu_maps_update_done();
2076 int cpuhp_smt_enable(void)
2080 cpu_maps_update_begin();
2081 cpu_smt_control = CPU_SMT_ENABLED;
2083 for_each_present_cpu(cpu) {
2084 /* Skip online CPUs and CPUs on offline nodes */
2085 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2087 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2090 /* See comment in cpuhp_smt_disable() */
2091 cpuhp_online_cpu_device(cpu);
2093 cpu_maps_update_done();
2098 store_smt_control(struct device *dev, struct device_attribute *attr,
2099 const char *buf, size_t count)
2103 if (sysfs_streq(buf, "on"))
2104 ctrlval = CPU_SMT_ENABLED;
2105 else if (sysfs_streq(buf, "off"))
2106 ctrlval = CPU_SMT_DISABLED;
2107 else if (sysfs_streq(buf, "forceoff"))
2108 ctrlval = CPU_SMT_FORCE_DISABLED;
2112 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2115 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2118 ret = lock_device_hotplug_sysfs();
2122 if (ctrlval != cpu_smt_control) {
2124 case CPU_SMT_ENABLED:
2125 ret = cpuhp_smt_enable();
2127 case CPU_SMT_DISABLED:
2128 case CPU_SMT_FORCE_DISABLED:
2129 ret = cpuhp_smt_disable(ctrlval);
2134 unlock_device_hotplug();
2135 return ret ? ret : count;
2137 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2140 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2142 bool active = topology_max_smt_threads() > 1;
2144 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2146 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2148 static struct attribute *cpuhp_smt_attrs[] = {
2149 &dev_attr_control.attr,
2150 &dev_attr_active.attr,
2154 static const struct attribute_group cpuhp_smt_attr_group = {
2155 .attrs = cpuhp_smt_attrs,
2160 static int __init cpu_smt_state_init(void)
2162 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2163 &cpuhp_smt_attr_group);
2167 static inline int cpu_smt_state_init(void) { return 0; }
2170 static int __init cpuhp_sysfs_init(void)
2174 ret = cpu_smt_state_init();
2178 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2179 &cpuhp_cpu_root_attr_group);
2183 for_each_possible_cpu(cpu) {
2184 struct device *dev = get_cpu_device(cpu);
2188 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2194 device_initcall(cpuhp_sysfs_init);
2198 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2199 * represents all NR_CPUS bits binary values of 1<<nr.
2201 * It is used by cpumask_of() to get a constant address to a CPU
2202 * mask value that has a single bit set only.
2205 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2206 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2207 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2208 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2209 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2211 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2213 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2214 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2215 #if BITS_PER_LONG > 32
2216 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2217 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2220 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2222 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2223 EXPORT_SYMBOL(cpu_all_bits);
2225 #ifdef CONFIG_INIT_ALL_POSSIBLE
2226 struct cpumask __cpu_possible_mask __read_mostly
2229 struct cpumask __cpu_possible_mask __read_mostly;
2231 EXPORT_SYMBOL(__cpu_possible_mask);
2233 struct cpumask __cpu_online_mask __read_mostly;
2234 EXPORT_SYMBOL(__cpu_online_mask);
2236 struct cpumask __cpu_present_mask __read_mostly;
2237 EXPORT_SYMBOL(__cpu_present_mask);
2239 struct cpumask __cpu_active_mask __read_mostly;
2240 EXPORT_SYMBOL(__cpu_active_mask);
2242 void init_cpu_present(const struct cpumask *src)
2244 cpumask_copy(&__cpu_present_mask, src);
2247 void init_cpu_possible(const struct cpumask *src)
2249 cpumask_copy(&__cpu_possible_mask, src);
2252 void init_cpu_online(const struct cpumask *src)
2254 cpumask_copy(&__cpu_online_mask, src);
2258 * Activate the first processor.
2260 void __init boot_cpu_init(void)
2262 int cpu = smp_processor_id();
2264 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2265 set_cpu_online(cpu, true);
2266 set_cpu_active(cpu, true);
2267 set_cpu_present(cpu, true);
2268 set_cpu_possible(cpu, true);
2271 __boot_cpu_id = cpu;
2276 * Must be called _AFTER_ setting up the per_cpu areas
2278 void __init boot_cpu_hotplug_init(void)
2281 this_cpu_write(cpuhp_state.booted_once, true);
2283 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2287 * These are used for a global "mitigations=" cmdline option for toggling
2288 * optional CPU mitigations.
2290 enum cpu_mitigations {
2291 CPU_MITIGATIONS_OFF,
2292 CPU_MITIGATIONS_AUTO,
2293 CPU_MITIGATIONS_AUTO_NOSMT,
2296 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2297 CPU_MITIGATIONS_AUTO;
2299 static int __init mitigations_parse_cmdline(char *arg)
2301 if (!strcmp(arg, "off"))
2302 cpu_mitigations = CPU_MITIGATIONS_OFF;
2303 else if (!strcmp(arg, "auto"))
2304 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2305 else if (!strcmp(arg, "auto,nosmt"))
2306 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2308 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2313 early_param("mitigations", mitigations_parse_cmdline);
2315 /* mitigations=off */
2316 bool cpu_mitigations_off(void)
2318 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2320 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2322 /* mitigations=auto,nosmt */
2323 bool cpu_mitigations_auto_nosmt(void)
2325 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2327 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);