1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic helpers for smp ipi calls
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/gfp.h>
18 #include <linux/smp.h>
19 #include <linux/cpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/idle.h>
22 #include <linux/hypervisor.h>
28 CSD_FLAG_SYNCHRONOUS = 0x02,
31 struct call_function_data {
32 call_single_data_t __percpu *csd;
33 cpumask_var_t cpumask;
34 cpumask_var_t cpumask_ipi;
37 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
41 static void flush_smp_call_function_queue(bool warn_cpu_offline);
43 int smpcfd_prepare_cpu(unsigned int cpu)
45 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
52 free_cpumask_var(cfd->cpumask);
55 cfd->csd = alloc_percpu(call_single_data_t);
57 free_cpumask_var(cfd->cpumask);
58 free_cpumask_var(cfd->cpumask_ipi);
65 int smpcfd_dead_cpu(unsigned int cpu)
67 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
69 free_cpumask_var(cfd->cpumask);
70 free_cpumask_var(cfd->cpumask_ipi);
71 free_percpu(cfd->csd);
75 int smpcfd_dying_cpu(unsigned int cpu)
78 * The IPIs for the smp-call-function callbacks queued by other
79 * CPUs might arrive late, either due to hardware latencies or
80 * because this CPU disabled interrupts (inside stop-machine)
81 * before the IPIs were sent. So flush out any pending callbacks
82 * explicitly (without waiting for the IPIs to arrive), to
83 * ensure that the outgoing CPU doesn't go offline with work
86 flush_smp_call_function_queue(false);
91 void __init call_function_init(void)
95 for_each_possible_cpu(i)
96 init_llist_head(&per_cpu(call_single_queue, i));
98 smpcfd_prepare_cpu(smp_processor_id());
102 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
104 * For non-synchronous ipi calls the csd can still be in use by the
105 * previous function call. For multi-cpu calls its even more interesting
106 * as we'll have to ensure no other cpu is observing our csd.
108 static __always_inline void csd_lock_wait(call_single_data_t *csd)
110 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
113 static __always_inline void csd_lock(call_single_data_t *csd)
116 csd->flags |= CSD_FLAG_LOCK;
119 * prevent CPU from reordering the above assignment
120 * to ->flags with any subsequent assignments to other
121 * fields of the specified call_single_data_t structure:
126 static __always_inline void csd_unlock(call_single_data_t *csd)
128 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
131 * ensure we're all done before releasing data:
133 smp_store_release(&csd->flags, 0);
136 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
138 extern void send_call_function_single_ipi(int cpu);
141 * Insert a previously allocated call_single_data_t element
142 * for execution on the given CPU. data must already have
143 * ->func, ->info, and ->flags set.
145 static int generic_exec_single(int cpu, call_single_data_t *csd,
146 smp_call_func_t func, void *info)
148 if (cpu == smp_processor_id()) {
152 * We can unlock early even for the synchronous on-stack case,
153 * since we're doing this from the same CPU..
156 local_irq_save(flags);
158 local_irq_restore(flags);
163 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
172 * The list addition should be visible before sending the IPI
173 * handler locks the list to pull the entry off it because of
174 * normal cache coherency rules implied by spinlocks.
176 * If IPIs can go out of order to the cache coherency protocol
177 * in an architecture, sufficient synchronisation should be added
178 * to arch code to make it appear to obey cache coherency WRT
179 * locking and barrier primitives. Generic code isn't really
180 * equipped to do the right thing...
182 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
183 send_call_function_single_ipi(cpu);
189 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
191 * Invoked by arch to handle an IPI for call function single.
192 * Must be called with interrupts disabled.
194 void generic_smp_call_function_single_interrupt(void)
196 flush_smp_call_function_queue(true);
199 * Handle irq works queued remotely by irq_work_queue_on().
200 * Smp functions above are typically synchronous so they
201 * better run first since some other CPUs may be busy waiting
208 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
210 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
211 * offline CPU. Skip this check if set to 'false'.
213 * Flush any pending smp-call-function callbacks queued on this CPU. This is
214 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
215 * to ensure that all pending IPI callbacks are run before it goes completely
218 * Loop through the call_single_queue and run all the queued callbacks.
219 * Must be called with interrupts disabled.
221 static void flush_smp_call_function_queue(bool warn_cpu_offline)
223 call_single_data_t *csd, *csd_next;
224 struct llist_node *entry, *prev;
225 struct llist_head *head;
228 lockdep_assert_irqs_disabled();
230 head = this_cpu_ptr(&call_single_queue);
231 entry = llist_del_all(head);
232 entry = llist_reverse_order(entry);
234 /* There shouldn't be any pending callbacks on an offline CPU. */
235 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
236 !warned && !llist_empty(head))) {
238 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
241 * We don't have to use the _safe() variant here
242 * because we are not invoking the IPI handlers yet.
244 llist_for_each_entry(csd, entry, llist)
245 pr_warn("IPI callback %pS sent to offline CPU\n",
250 * First; run all SYNC callbacks, people are waiting for us.
253 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
254 smp_call_func_t func = csd->func;
255 void *info = csd->info;
257 /* Do we wait until *after* callback? */
258 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
260 prev->next = &csd_next->llist;
262 entry = &csd_next->llist;
272 * Second; run all !SYNC callbacks.
274 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
275 smp_call_func_t func = csd->func;
276 void *info = csd->info;
283 void flush_smp_call_function_from_idle(void)
287 if (llist_empty(this_cpu_ptr(&call_single_queue)))
290 local_irq_save(flags);
291 flush_smp_call_function_queue(true);
292 local_irq_restore(flags);
296 * smp_call_function_single - Run a function on a specific CPU
297 * @func: The function to run. This must be fast and non-blocking.
298 * @info: An arbitrary pointer to pass to the function.
299 * @wait: If true, wait until function has completed on other CPUs.
301 * Returns 0 on success, else a negative status code.
303 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
306 call_single_data_t *csd;
307 call_single_data_t csd_stack = {
308 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
314 * prevent preemption and reschedule on another processor,
315 * as well as CPU removal
317 this_cpu = get_cpu();
320 * Can deadlock when called with interrupts disabled.
321 * We allow cpu's that are not yet online though, as no one else can
322 * send smp call function interrupt to this cpu and as such deadlocks
325 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
326 && !oops_in_progress);
329 * When @wait we can deadlock when we interrupt between llist_add() and
330 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
331 * csd_lock() on because the interrupt context uses the same csd
334 WARN_ON_ONCE(!in_task());
338 csd = this_cpu_ptr(&csd_data);
342 err = generic_exec_single(cpu, csd, func, info);
351 EXPORT_SYMBOL(smp_call_function_single);
354 * smp_call_function_single_async(): Run an asynchronous function on a
356 * @cpu: The CPU to run on.
357 * @csd: Pre-allocated and setup data structure
359 * Like smp_call_function_single(), but the call is asynchonous and
360 * can thus be done from contexts with disabled interrupts.
362 * The caller passes his own pre-allocated data structure
363 * (ie: embedded in an object) and is responsible for synchronizing it
364 * such that the IPIs performed on the @csd are strictly serialized.
366 * If the function is called with one csd which has not yet been
367 * processed by previous call to smp_call_function_single_async(), the
368 * function will return immediately with -EBUSY showing that the csd
369 * object is still in progress.
371 * NOTE: Be careful, there is unfortunately no current debugging facility to
372 * validate the correctness of this serialization.
374 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
380 if (csd->flags & CSD_FLAG_LOCK) {
385 csd->flags = CSD_FLAG_LOCK;
388 err = generic_exec_single(cpu, csd, csd->func, csd->info);
395 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
398 * smp_call_function_any - Run a function on any of the given cpus
399 * @mask: The mask of cpus it can run on.
400 * @func: The function to run. This must be fast and non-blocking.
401 * @info: An arbitrary pointer to pass to the function.
402 * @wait: If true, wait until function has completed.
404 * Returns 0 on success, else a negative status code (if no cpus were online).
406 * Selection preference:
407 * 1) current cpu if in @mask
408 * 2) any cpu of current node if in @mask
409 * 3) any other online cpu in @mask
411 int smp_call_function_any(const struct cpumask *mask,
412 smp_call_func_t func, void *info, int wait)
415 const struct cpumask *nodemask;
418 /* Try for same CPU (cheapest) */
420 if (cpumask_test_cpu(cpu, mask))
423 /* Try for same node. */
424 nodemask = cpumask_of_node(cpu_to_node(cpu));
425 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
426 cpu = cpumask_next_and(cpu, nodemask, mask)) {
431 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
432 cpu = cpumask_any_and(mask, cpu_online_mask);
434 ret = smp_call_function_single(cpu, func, info, wait);
438 EXPORT_SYMBOL_GPL(smp_call_function_any);
440 static void smp_call_function_many_cond(const struct cpumask *mask,
441 smp_call_func_t func, void *info,
442 bool wait, smp_cond_func_t cond_func)
444 struct call_function_data *cfd;
445 int cpu, next_cpu, this_cpu = smp_processor_id();
448 * Can deadlock when called with interrupts disabled.
449 * We allow cpu's that are not yet online though, as no one else can
450 * send smp call function interrupt to this cpu and as such deadlocks
453 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
454 && !oops_in_progress && !early_boot_irqs_disabled);
457 * When @wait we can deadlock when we interrupt between llist_add() and
458 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
459 * csd_lock() on because the interrupt context uses the same csd
462 WARN_ON_ONCE(!in_task());
464 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
465 cpu = cpumask_first_and(mask, cpu_online_mask);
467 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
469 /* No online cpus? We're done. */
470 if (cpu >= nr_cpu_ids)
473 /* Do we have another CPU which isn't us? */
474 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
475 if (next_cpu == this_cpu)
476 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
478 /* Fastpath: do that cpu by itself. */
479 if (next_cpu >= nr_cpu_ids) {
480 if (!cond_func || cond_func(cpu, info))
481 smp_call_function_single(cpu, func, info, wait);
485 cfd = this_cpu_ptr(&cfd_data);
487 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
488 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
490 /* Some callers race with other cpus changing the passed mask */
491 if (unlikely(!cpumask_weight(cfd->cpumask)))
494 cpumask_clear(cfd->cpumask_ipi);
495 for_each_cpu(cpu, cfd->cpumask) {
496 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
498 if (cond_func && !cond_func(cpu, info))
503 csd->flags |= CSD_FLAG_SYNCHRONOUS;
506 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
507 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
510 /* Send a message to all CPUs in the map */
511 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
514 for_each_cpu(cpu, cfd->cpumask) {
515 call_single_data_t *csd;
517 csd = per_cpu_ptr(cfd->csd, cpu);
524 * smp_call_function_many(): Run a function on a set of other CPUs.
525 * @mask: The set of cpus to run on (only runs on online subset).
526 * @func: The function to run. This must be fast and non-blocking.
527 * @info: An arbitrary pointer to pass to the function.
528 * @wait: If true, wait (atomically) until function has completed
531 * If @wait is true, then returns once @func has returned.
533 * You must not call this function with disabled interrupts or from a
534 * hardware interrupt handler or from a bottom half handler. Preemption
535 * must be disabled when calling this function.
537 void smp_call_function_many(const struct cpumask *mask,
538 smp_call_func_t func, void *info, bool wait)
540 smp_call_function_many_cond(mask, func, info, wait, NULL);
542 EXPORT_SYMBOL(smp_call_function_many);
545 * smp_call_function(): Run a function on all other CPUs.
546 * @func: The function to run. This must be fast and non-blocking.
547 * @info: An arbitrary pointer to pass to the function.
548 * @wait: If true, wait (atomically) until function has completed
553 * If @wait is true, then returns once @func has returned; otherwise
554 * it returns just before the target cpu calls @func.
556 * You must not call this function with disabled interrupts or from a
557 * hardware interrupt handler or from a bottom half handler.
559 void smp_call_function(smp_call_func_t func, void *info, int wait)
562 smp_call_function_many(cpu_online_mask, func, info, wait);
565 EXPORT_SYMBOL(smp_call_function);
567 /* Setup configured maximum number of CPUs to activate */
568 unsigned int setup_max_cpus = NR_CPUS;
569 EXPORT_SYMBOL(setup_max_cpus);
573 * Setup routine for controlling SMP activation
575 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
576 * activation entirely (the MPS table probe still happens, though).
578 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
579 * greater than 0, limits the maximum number of CPUs activated in
583 void __weak arch_disable_smp_support(void) { }
585 static int __init nosmp(char *str)
588 arch_disable_smp_support();
593 early_param("nosmp", nosmp);
595 /* this is hard limit */
596 static int __init nrcpus(char *str)
600 get_option(&str, &nr_cpus);
601 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
602 nr_cpu_ids = nr_cpus;
607 early_param("nr_cpus", nrcpus);
609 static int __init maxcpus(char *str)
611 get_option(&str, &setup_max_cpus);
612 if (setup_max_cpus == 0)
613 arch_disable_smp_support();
618 early_param("maxcpus", maxcpus);
620 /* Setup number of possible processor ids */
621 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
622 EXPORT_SYMBOL(nr_cpu_ids);
624 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
625 void __init setup_nr_cpu_ids(void)
627 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
630 /* Called by boot processor to activate the rest. */
631 void __init smp_init(void)
633 int num_nodes, num_cpus;
636 cpuhp_threads_init();
638 pr_info("Bringing up secondary CPUs ...\n");
640 bringup_nonboot_cpus(setup_max_cpus);
642 num_nodes = num_online_nodes();
643 num_cpus = num_online_cpus();
644 pr_info("Brought up %d node%s, %d CPU%s\n",
645 num_nodes, (num_nodes > 1 ? "s" : ""),
646 num_cpus, (num_cpus > 1 ? "s" : ""));
648 /* Any cleanup work */
649 smp_cpus_done(setup_max_cpus);
653 * Call a function on all processors. May be used during early boot while
654 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
655 * of local_irq_disable/enable().
657 void on_each_cpu(void (*func) (void *info), void *info, int wait)
662 smp_call_function(func, info, wait);
663 local_irq_save(flags);
665 local_irq_restore(flags);
668 EXPORT_SYMBOL(on_each_cpu);
671 * on_each_cpu_mask(): Run a function on processors specified by
672 * cpumask, which may include the local processor.
673 * @mask: The set of cpus to run on (only runs on online subset).
674 * @func: The function to run. This must be fast and non-blocking.
675 * @info: An arbitrary pointer to pass to the function.
676 * @wait: If true, wait (atomically) until function has completed
679 * If @wait is true, then returns once @func has returned.
681 * You must not call this function with disabled interrupts or from a
682 * hardware interrupt handler or from a bottom half handler. The
683 * exception is that it may be used during early boot while
684 * early_boot_irqs_disabled is set.
686 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
687 void *info, bool wait)
691 smp_call_function_many(mask, func, info, wait);
692 if (cpumask_test_cpu(cpu, mask)) {
694 local_irq_save(flags);
696 local_irq_restore(flags);
700 EXPORT_SYMBOL(on_each_cpu_mask);
703 * on_each_cpu_cond(): Call a function on each processor for which
704 * the supplied function cond_func returns true, optionally waiting
705 * for all the required CPUs to finish. This may include the local
707 * @cond_func: A callback function that is passed a cpu id and
708 * the the info parameter. The function is called
709 * with preemption disabled. The function should
710 * return a blooean value indicating whether to IPI
712 * @func: The function to run on all applicable CPUs.
713 * This must be fast and non-blocking.
714 * @info: An arbitrary pointer to pass to both functions.
715 * @wait: If true, wait (atomically) until function has
716 * completed on other CPUs.
718 * Preemption is disabled to protect against CPUs going offline but not online.
719 * CPUs going online during the call will not be seen or sent an IPI.
721 * You must not call this function with disabled interrupts or
722 * from a hardware interrupt handler or from a bottom half handler.
724 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
725 void *info, bool wait, const struct cpumask *mask)
729 smp_call_function_many_cond(mask, func, info, wait, cond_func);
730 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
733 local_irq_save(flags);
735 local_irq_restore(flags);
739 EXPORT_SYMBOL(on_each_cpu_cond_mask);
741 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
742 void *info, bool wait)
744 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
746 EXPORT_SYMBOL(on_each_cpu_cond);
748 static void do_nothing(void *unused)
753 * kick_all_cpus_sync - Force all cpus out of idle
755 * Used to synchronize the update of pm_idle function pointer. It's
756 * called after the pointer is updated and returns after the dummy
757 * callback function has been executed on all cpus. The execution of
758 * the function can only happen on the remote cpus after they have
759 * left the idle function which had been called via pm_idle function
760 * pointer. So it's guaranteed that nothing uses the previous pointer
763 void kick_all_cpus_sync(void)
765 /* Make sure the change is visible before we kick the cpus */
767 smp_call_function(do_nothing, NULL, 1);
769 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
772 * wake_up_all_idle_cpus - break all cpus out of idle
773 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
774 * including idle polling cpus, for non-idle cpus, we will do nothing
777 void wake_up_all_idle_cpus(void)
782 for_each_online_cpu(cpu) {
783 if (cpu == smp_processor_id())
786 wake_up_if_idle(cpu);
790 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
793 * smp_call_on_cpu - Call a function on a specific cpu
795 * Used to call a function on a specific cpu and wait for it to return.
796 * Optionally make sure the call is done on a specified physical cpu via vcpu
797 * pinning in order to support virtualized environments.
799 struct smp_call_on_cpu_struct {
800 struct work_struct work;
801 struct completion done;
808 static void smp_call_on_cpu_callback(struct work_struct *work)
810 struct smp_call_on_cpu_struct *sscs;
812 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
814 hypervisor_pin_vcpu(sscs->cpu);
815 sscs->ret = sscs->func(sscs->data);
817 hypervisor_pin_vcpu(-1);
819 complete(&sscs->done);
822 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
824 struct smp_call_on_cpu_struct sscs = {
825 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
828 .cpu = phys ? cpu : -1,
831 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
833 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
836 queue_work_on(cpu, system_wq, &sscs.work);
837 wait_for_completion(&sscs.done);
841 EXPORT_SYMBOL_GPL(smp_call_on_cpu);