1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic helpers for smp ipi calls
5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/jump_label.h>
29 #include <trace/events/ipi.h>
32 #include "sched/smp.h"
34 #define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
36 struct call_function_data {
37 call_single_data_t __percpu *csd;
38 cpumask_var_t cpumask;
39 cpumask_var_t cpumask_ipi;
42 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
44 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
46 static void __flush_smp_call_function_queue(bool warn_cpu_offline);
48 int smpcfd_prepare_cpu(unsigned int cpu)
50 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
52 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
57 free_cpumask_var(cfd->cpumask);
60 cfd->csd = alloc_percpu(call_single_data_t);
62 free_cpumask_var(cfd->cpumask);
63 free_cpumask_var(cfd->cpumask_ipi);
70 int smpcfd_dead_cpu(unsigned int cpu)
72 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
74 free_cpumask_var(cfd->cpumask);
75 free_cpumask_var(cfd->cpumask_ipi);
76 free_percpu(cfd->csd);
80 int smpcfd_dying_cpu(unsigned int cpu)
83 * The IPIs for the smp-call-function callbacks queued by other
84 * CPUs might arrive late, either due to hardware latencies or
85 * because this CPU disabled interrupts (inside stop-machine)
86 * before the IPIs were sent. So flush out any pending callbacks
87 * explicitly (without waiting for the IPIs to arrive), to
88 * ensure that the outgoing CPU doesn't go offline with work
91 __flush_smp_call_function_queue(false);
96 void __init call_function_init(void)
100 for_each_possible_cpu(i)
101 init_llist_head(&per_cpu(call_single_queue, i));
103 smpcfd_prepare_cpu(smp_processor_id());
106 static __always_inline void
107 send_call_function_single_ipi(int cpu)
109 if (call_function_single_prep_ipi(cpu)) {
110 trace_ipi_send_cpu(cpu, _RET_IP_,
111 generic_smp_call_function_single_interrupt);
112 arch_send_call_function_single_ipi(cpu);
116 static __always_inline void
117 send_call_function_ipi_mask(struct cpumask *mask)
119 trace_ipi_send_cpumask(mask, _RET_IP_,
120 generic_smp_call_function_single_interrupt);
121 arch_send_call_function_ipi_mask(mask);
124 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
126 static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
129 * Parse the csdlock_debug= kernel boot parameter.
131 * If you need to restore the old "ext" value that once provided
132 * additional debugging information, reapply the following commits:
134 * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
135 * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
137 static int __init csdlock_debug(char *str)
140 unsigned int val = 0;
142 ret = get_option(&str, &val);
145 static_branch_enable(&csdlock_debug_enabled);
147 static_branch_disable(&csdlock_debug_enabled);
152 __setup("csdlock_debug=", csdlock_debug);
154 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
155 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
156 static DEFINE_PER_CPU(void *, cur_csd_info);
158 static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
159 module_param(csd_lock_timeout, ulong, 0444);
161 static atomic_t csd_bug_count = ATOMIC_INIT(0);
163 /* Record current CSD work for current CPU, NULL to erase. */
164 static void __csd_lock_record(struct __call_single_data *csd)
167 smp_mb(); /* NULL cur_csd after unlock. */
168 __this_cpu_write(cur_csd, NULL);
171 __this_cpu_write(cur_csd_func, csd->func);
172 __this_cpu_write(cur_csd_info, csd->info);
173 smp_wmb(); /* func and info before csd. */
174 __this_cpu_write(cur_csd, csd);
175 smp_mb(); /* Update cur_csd before function call. */
176 /* Or before unlock, as the case may be. */
179 static __always_inline void csd_lock_record(struct __call_single_data *csd)
181 if (static_branch_unlikely(&csdlock_debug_enabled))
182 __csd_lock_record(csd);
185 static int csd_lock_wait_getcpu(struct __call_single_data *csd)
187 unsigned int csd_type;
189 csd_type = CSD_TYPE(csd);
190 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
191 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
196 * Complain if too much time spent waiting. Note that only
197 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
198 * so waiting on other types gets much less information.
200 static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
206 call_single_data_t *cpu_cur_csd;
207 unsigned int flags = READ_ONCE(csd->node.u_flags);
208 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
210 if (!(flags & CSD_FLAG_LOCK)) {
211 if (!unlikely(*bug_id))
213 cpu = csd_lock_wait_getcpu(csd);
214 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
215 *bug_id, raw_smp_processor_id(), cpu);
220 ts_delta = ts2 - *ts1;
221 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0))
224 firsttime = !*bug_id;
226 *bug_id = atomic_inc_return(&csd_bug_count);
227 cpu = csd_lock_wait_getcpu(csd);
228 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
232 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
233 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
234 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
235 cpu, csd->func, csd->info);
236 if (cpu_cur_csd && csd != cpu_cur_csd) {
237 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
238 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
239 READ_ONCE(per_cpu(cur_csd_info, cpux)));
241 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
242 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
247 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
248 arch_send_call_function_single_ipi(cpu);
258 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
260 * For non-synchronous ipi calls the csd can still be in use by the
261 * previous function call. For multi-cpu calls its even more interesting
262 * as we'll have to ensure no other cpu is observing our csd.
264 static void __csd_lock_wait(struct __call_single_data *csd)
269 ts1 = ts0 = sched_clock();
271 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
275 smp_acquire__after_ctrl_dep();
278 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
280 if (static_branch_unlikely(&csdlock_debug_enabled)) {
281 __csd_lock_wait(csd);
285 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
288 static void csd_lock_record(struct __call_single_data *csd)
292 static __always_inline void csd_lock_wait(struct __call_single_data *csd)
294 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
298 static __always_inline void csd_lock(struct __call_single_data *csd)
301 csd->node.u_flags |= CSD_FLAG_LOCK;
304 * prevent CPU from reordering the above assignment
305 * to ->flags with any subsequent assignments to other
306 * fields of the specified call_single_data_t structure:
311 static __always_inline void csd_unlock(struct __call_single_data *csd)
313 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
316 * ensure we're all done before releasing data:
318 smp_store_release(&csd->node.u_flags, 0);
321 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
323 void __smp_call_single_queue(int cpu, struct llist_node *node)
326 * We have to check the type of the CSD before queueing it, because
327 * once queued it can have its flags cleared by
328 * flush_smp_call_function_queue()
329 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
330 * executes migration_cpu_stop() on the remote CPU).
332 if (trace_ipi_send_cpu_enabled()) {
333 call_single_data_t *csd;
334 smp_call_func_t func;
336 csd = container_of(node, call_single_data_t, node.llist);
337 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
338 sched_ttwu_pending : csd->func;
340 trace_ipi_send_cpu(cpu, _RET_IP_, func);
344 * The list addition should be visible to the target CPU when it pops
345 * the head of the list to pull the entry off it in the IPI handler
346 * because of normal cache coherency rules implied by the underlying
349 * If IPIs can go out of order to the cache coherency protocol
350 * in an architecture, sufficient synchronisation should be added
351 * to arch code to make it appear to obey cache coherency WRT
352 * locking and barrier primitives. Generic code isn't really
353 * equipped to do the right thing...
355 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
356 send_call_function_single_ipi(cpu);
360 * Insert a previously allocated call_single_data_t element
361 * for execution on the given CPU. data must already have
362 * ->func, ->info, and ->flags set.
364 static int generic_exec_single(int cpu, struct __call_single_data *csd)
366 if (cpu == smp_processor_id()) {
367 smp_call_func_t func = csd->func;
368 void *info = csd->info;
372 * We can unlock early even for the synchronous on-stack case,
373 * since we're doing this from the same CPU..
375 csd_lock_record(csd);
377 local_irq_save(flags);
379 csd_lock_record(NULL);
380 local_irq_restore(flags);
384 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
389 __smp_call_single_queue(cpu, &csd->node.llist);
395 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
397 * Invoked by arch to handle an IPI for call function single.
398 * Must be called with interrupts disabled.
400 void generic_smp_call_function_single_interrupt(void)
402 __flush_smp_call_function_queue(true);
406 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
408 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
409 * offline CPU. Skip this check if set to 'false'.
411 * Flush any pending smp-call-function callbacks queued on this CPU. This is
412 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
413 * to ensure that all pending IPI callbacks are run before it goes completely
416 * Loop through the call_single_queue and run all the queued callbacks.
417 * Must be called with interrupts disabled.
419 static void __flush_smp_call_function_queue(bool warn_cpu_offline)
421 call_single_data_t *csd, *csd_next;
422 struct llist_node *entry, *prev;
423 struct llist_head *head;
426 lockdep_assert_irqs_disabled();
428 head = this_cpu_ptr(&call_single_queue);
429 entry = llist_del_all(head);
430 entry = llist_reverse_order(entry);
432 /* There shouldn't be any pending callbacks on an offline CPU. */
433 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
434 !warned && entry != NULL)) {
436 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
439 * We don't have to use the _safe() variant here
440 * because we are not invoking the IPI handlers yet.
442 llist_for_each_entry(csd, entry, node.llist) {
443 switch (CSD_TYPE(csd)) {
446 case CSD_TYPE_IRQ_WORK:
447 pr_warn("IPI callback %pS sent to offline CPU\n",
452 pr_warn("IPI task-wakeup sent to offline CPU\n");
456 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
464 * First; run all SYNC callbacks, people are waiting for us.
467 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
468 /* Do we wait until *after* callback? */
469 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
470 smp_call_func_t func = csd->func;
471 void *info = csd->info;
474 prev->next = &csd_next->node.llist;
476 entry = &csd_next->node.llist;
479 csd_lock_record(csd);
482 csd_lock_record(NULL);
484 prev = &csd->node.llist;
492 * Second; run all !SYNC callbacks.
495 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
496 int type = CSD_TYPE(csd);
498 if (type != CSD_TYPE_TTWU) {
500 prev->next = &csd_next->node.llist;
502 entry = &csd_next->node.llist;
505 if (type == CSD_TYPE_ASYNC) {
506 smp_call_func_t func = csd->func;
507 void *info = csd->info;
509 csd_lock_record(csd);
512 csd_lock_record(NULL);
513 } else if (type == CSD_TYPE_IRQ_WORK) {
514 irq_work_single(csd);
518 prev = &csd->node.llist;
523 * Third; only CSD_TYPE_TTWU is left, issue those.
526 sched_ttwu_pending(entry);
531 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
532 * from task context (idle, migration thread)
534 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
535 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
536 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
537 * handle queued SMP function calls before scheduling.
539 * The migration thread has to ensure that an eventually pending wakeup has
540 * been handled before it migrates a task.
542 void flush_smp_call_function_queue(void)
544 unsigned int was_pending;
547 if (llist_empty(this_cpu_ptr(&call_single_queue)))
550 local_irq_save(flags);
551 /* Get the already pending soft interrupts for RT enabled kernels */
552 was_pending = local_softirq_pending();
553 __flush_smp_call_function_queue(true);
554 if (local_softirq_pending())
555 do_softirq_post_smp_call_flush(was_pending);
557 local_irq_restore(flags);
561 * smp_call_function_single - Run a function on a specific CPU
562 * @func: The function to run. This must be fast and non-blocking.
563 * @info: An arbitrary pointer to pass to the function.
564 * @wait: If true, wait until function has completed on other CPUs.
566 * Returns 0 on success, else a negative status code.
568 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
571 call_single_data_t *csd;
572 call_single_data_t csd_stack = {
573 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
579 * prevent preemption and reschedule on another processor,
580 * as well as CPU removal
582 this_cpu = get_cpu();
585 * Can deadlock when called with interrupts disabled.
586 * We allow cpu's that are not yet online though, as no one else can
587 * send smp call function interrupt to this cpu and as such deadlocks
590 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
591 && !oops_in_progress);
594 * When @wait we can deadlock when we interrupt between llist_add() and
595 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
596 * csd_lock() on because the interrupt context uses the same csd
599 WARN_ON_ONCE(!in_task());
603 csd = this_cpu_ptr(&csd_data);
609 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
610 csd->node.src = smp_processor_id();
614 err = generic_exec_single(cpu, csd);
623 EXPORT_SYMBOL(smp_call_function_single);
626 * smp_call_function_single_async() - Run an asynchronous function on a
628 * @cpu: The CPU to run on.
629 * @csd: Pre-allocated and setup data structure
631 * Like smp_call_function_single(), but the call is asynchonous and
632 * can thus be done from contexts with disabled interrupts.
634 * The caller passes his own pre-allocated data structure
635 * (ie: embedded in an object) and is responsible for synchronizing it
636 * such that the IPIs performed on the @csd are strictly serialized.
638 * If the function is called with one csd which has not yet been
639 * processed by previous call to smp_call_function_single_async(), the
640 * function will return immediately with -EBUSY showing that the csd
641 * object is still in progress.
643 * NOTE: Be careful, there is unfortunately no current debugging facility to
644 * validate the correctness of this serialization.
646 * Return: %0 on success or negative errno value on error
648 int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
654 if (csd->node.u_flags & CSD_FLAG_LOCK) {
659 csd->node.u_flags = CSD_FLAG_LOCK;
662 err = generic_exec_single(cpu, csd);
669 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
672 * smp_call_function_any - Run a function on any of the given cpus
673 * @mask: The mask of cpus it can run on.
674 * @func: The function to run. This must be fast and non-blocking.
675 * @info: An arbitrary pointer to pass to the function.
676 * @wait: If true, wait until function has completed.
678 * Returns 0 on success, else a negative status code (if no cpus were online).
680 * Selection preference:
681 * 1) current cpu if in @mask
682 * 2) any cpu of current node if in @mask
683 * 3) any other online cpu in @mask
685 int smp_call_function_any(const struct cpumask *mask,
686 smp_call_func_t func, void *info, int wait)
689 const struct cpumask *nodemask;
692 /* Try for same CPU (cheapest) */
694 if (cpumask_test_cpu(cpu, mask))
697 /* Try for same node. */
698 nodemask = cpumask_of_node(cpu_to_node(cpu));
699 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
700 cpu = cpumask_next_and(cpu, nodemask, mask)) {
705 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
706 cpu = cpumask_any_and(mask, cpu_online_mask);
708 ret = smp_call_function_single(cpu, func, info, wait);
712 EXPORT_SYMBOL_GPL(smp_call_function_any);
715 * Flags to be used as scf_flags argument of smp_call_function_many_cond().
717 * %SCF_WAIT: Wait until function execution is completed
718 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask
720 #define SCF_WAIT (1U << 0)
721 #define SCF_RUN_LOCAL (1U << 1)
723 static void smp_call_function_many_cond(const struct cpumask *mask,
724 smp_call_func_t func, void *info,
725 unsigned int scf_flags,
726 smp_cond_func_t cond_func)
728 int cpu, last_cpu, this_cpu = smp_processor_id();
729 struct call_function_data *cfd;
730 bool wait = scf_flags & SCF_WAIT;
731 int nr_cpus = 0, nr_queued = 0;
732 bool run_remote = false;
733 bool run_local = false;
735 lockdep_assert_preemption_disabled();
738 * Can deadlock when called with interrupts disabled.
739 * We allow cpu's that are not yet online though, as no one else can
740 * send smp call function interrupt to this cpu and as such deadlocks
743 if (cpu_online(this_cpu) && !oops_in_progress &&
744 !early_boot_irqs_disabled)
745 lockdep_assert_irqs_enabled();
748 * When @wait we can deadlock when we interrupt between llist_add() and
749 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
750 * csd_lock() on because the interrupt context uses the same csd
753 WARN_ON_ONCE(!in_task());
755 /* Check if we need local execution. */
756 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
759 /* Check if we need remote execution, i.e., any CPU excluding this one. */
760 cpu = cpumask_first_and(mask, cpu_online_mask);
762 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
763 if (cpu < nr_cpu_ids)
767 cfd = this_cpu_ptr(&cfd_data);
768 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
769 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
771 cpumask_clear(cfd->cpumask_ipi);
772 for_each_cpu(cpu, cfd->cpumask) {
773 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
775 if (cond_func && !cond_func(cpu, info)) {
776 __cpumask_clear_cpu(cpu, cfd->cpumask);
782 csd->node.u_flags |= CSD_TYPE_SYNC;
785 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
786 csd->node.src = smp_processor_id();
789 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
790 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
798 * Trace each smp_function_call_*() as an IPI, actual IPIs
799 * will be traced with func==generic_smp_call_function_single_ipi().
802 trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func);
805 * Choose the most efficient way to send an IPI. Note that the
806 * number of CPUs might be zero due to concurrent changes to the
810 send_call_function_single_ipi(last_cpu);
811 else if (likely(nr_cpus > 1))
812 send_call_function_ipi_mask(cfd->cpumask_ipi);
815 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
818 local_irq_save(flags);
820 local_irq_restore(flags);
823 if (run_remote && wait) {
824 for_each_cpu(cpu, cfd->cpumask) {
825 call_single_data_t *csd;
827 csd = per_cpu_ptr(cfd->csd, cpu);
834 * smp_call_function_many(): Run a function on a set of CPUs.
835 * @mask: The set of cpus to run on (only runs on online subset).
836 * @func: The function to run. This must be fast and non-blocking.
837 * @info: An arbitrary pointer to pass to the function.
838 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
839 * (atomically) until function has completed on other CPUs. If
840 * %SCF_RUN_LOCAL is set, the function will also be run locally
841 * if the local CPU is set in the @cpumask.
843 * If @wait is true, then returns once @func has returned.
845 * You must not call this function with disabled interrupts or from a
846 * hardware interrupt handler or from a bottom half handler. Preemption
847 * must be disabled when calling this function.
849 void smp_call_function_many(const struct cpumask *mask,
850 smp_call_func_t func, void *info, bool wait)
852 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
854 EXPORT_SYMBOL(smp_call_function_many);
857 * smp_call_function(): Run a function on all other CPUs.
858 * @func: The function to run. This must be fast and non-blocking.
859 * @info: An arbitrary pointer to pass to the function.
860 * @wait: If true, wait (atomically) until function has completed
865 * If @wait is true, then returns once @func has returned; otherwise
866 * it returns just before the target cpu calls @func.
868 * You must not call this function with disabled interrupts or from a
869 * hardware interrupt handler or from a bottom half handler.
871 void smp_call_function(smp_call_func_t func, void *info, int wait)
874 smp_call_function_many(cpu_online_mask, func, info, wait);
877 EXPORT_SYMBOL(smp_call_function);
879 /* Setup configured maximum number of CPUs to activate */
880 unsigned int setup_max_cpus = NR_CPUS;
881 EXPORT_SYMBOL(setup_max_cpus);
885 * Setup routine for controlling SMP activation
887 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
888 * activation entirely (the MPS table probe still happens, though).
890 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
891 * greater than 0, limits the maximum number of CPUs activated in
895 void __weak arch_disable_smp_support(void) { }
897 static int __init nosmp(char *str)
900 arch_disable_smp_support();
905 early_param("nosmp", nosmp);
907 /* this is hard limit */
908 static int __init nrcpus(char *str)
912 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
913 set_nr_cpu_ids(nr_cpus);
918 early_param("nr_cpus", nrcpus);
920 static int __init maxcpus(char *str)
922 get_option(&str, &setup_max_cpus);
923 if (setup_max_cpus == 0)
924 arch_disable_smp_support();
929 early_param("maxcpus", maxcpus);
931 #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
932 /* Setup number of possible processor ids */
933 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
934 EXPORT_SYMBOL(nr_cpu_ids);
937 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
938 void __init setup_nr_cpu_ids(void)
940 set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
943 /* Called by boot processor to activate the rest. */
944 void __init smp_init(void)
946 int num_nodes, num_cpus;
949 cpuhp_threads_init();
951 pr_info("Bringing up secondary CPUs ...\n");
953 bringup_nonboot_cpus(setup_max_cpus);
955 num_nodes = num_online_nodes();
956 num_cpus = num_online_cpus();
957 pr_info("Brought up %d node%s, %d CPU%s\n",
958 num_nodes, (num_nodes > 1 ? "s" : ""),
959 num_cpus, (num_cpus > 1 ? "s" : ""));
961 /* Any cleanup work */
962 smp_cpus_done(setup_max_cpus);
966 * on_each_cpu_cond(): Call a function on each processor for which
967 * the supplied function cond_func returns true, optionally waiting
968 * for all the required CPUs to finish. This may include the local
970 * @cond_func: A callback function that is passed a cpu id and
971 * the info parameter. The function is called
972 * with preemption disabled. The function should
973 * return a blooean value indicating whether to IPI
975 * @func: The function to run on all applicable CPUs.
976 * This must be fast and non-blocking.
977 * @info: An arbitrary pointer to pass to both functions.
978 * @wait: If true, wait (atomically) until function has
979 * completed on other CPUs.
981 * Preemption is disabled to protect against CPUs going offline but not online.
982 * CPUs going online during the call will not be seen or sent an IPI.
984 * You must not call this function with disabled interrupts or
985 * from a hardware interrupt handler or from a bottom half handler.
987 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
988 void *info, bool wait, const struct cpumask *mask)
990 unsigned int scf_flags = SCF_RUN_LOCAL;
993 scf_flags |= SCF_WAIT;
996 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
999 EXPORT_SYMBOL(on_each_cpu_cond_mask);
1001 static void do_nothing(void *unused)
1006 * kick_all_cpus_sync - Force all cpus out of idle
1008 * Used to synchronize the update of pm_idle function pointer. It's
1009 * called after the pointer is updated and returns after the dummy
1010 * callback function has been executed on all cpus. The execution of
1011 * the function can only happen on the remote cpus after they have
1012 * left the idle function which had been called via pm_idle function
1013 * pointer. So it's guaranteed that nothing uses the previous pointer
1016 void kick_all_cpus_sync(void)
1018 /* Make sure the change is visible before we kick the cpus */
1020 smp_call_function(do_nothing, NULL, 1);
1022 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1025 * wake_up_all_idle_cpus - break all cpus out of idle
1026 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1027 * including idle polling cpus, for non-idle cpus, we will do nothing
1030 void wake_up_all_idle_cpus(void)
1034 for_each_possible_cpu(cpu) {
1036 if (cpu != smp_processor_id() && cpu_online(cpu))
1037 wake_up_if_idle(cpu);
1041 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1044 * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1045 * @work: &work_struct
1046 * @done: &completion to signal
1047 * @func: function to call
1048 * @data: function's data argument
1049 * @ret: return value from @func
1050 * @cpu: target CPU (%-1 for any CPU)
1052 * Used to call a function on a specific cpu and wait for it to return.
1053 * Optionally make sure the call is done on a specified physical cpu via vcpu
1054 * pinning in order to support virtualized environments.
1056 struct smp_call_on_cpu_struct {
1057 struct work_struct work;
1058 struct completion done;
1059 int (*func)(void *);
1065 static void smp_call_on_cpu_callback(struct work_struct *work)
1067 struct smp_call_on_cpu_struct *sscs;
1069 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1071 hypervisor_pin_vcpu(sscs->cpu);
1072 sscs->ret = sscs->func(sscs->data);
1074 hypervisor_pin_vcpu(-1);
1076 complete(&sscs->done);
1079 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1081 struct smp_call_on_cpu_struct sscs = {
1082 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1085 .cpu = phys ? cpu : -1,
1088 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1090 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1093 queue_work_on(cpu, system_wq, &sscs.work);
1094 wait_for_completion(&sscs.done);
1098 EXPORT_SYMBOL_GPL(smp_call_on_cpu);