1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/softirq.c
5 * Copyright (C) 1992 Linus Torvalds
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
31 #include <asm/softirq_stack.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/irq.h>
37 - No shared variables, all the data are CPU local.
38 - If a softirq needs serialization, let it serialize itself
40 - Even if softirq is serialized, only local cpu is marked for
41 execution. Hence, we get something sort of weak cpu binding.
42 Though it is still not clear, will it result in better locality
46 - NET RX softirq. It is multithreaded and does not require
47 any global serialization.
48 - NET TX softirq. It kicks software netdevice queues, hence
49 it is logically serialized per device, but this serialization
50 is invisible to common code.
51 - Tasklets: serialized wrt itself.
54 #ifndef __ARCH_IRQ_STAT
55 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56 EXPORT_PER_CPU_SYMBOL(irq_stat);
59 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
61 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
63 const char * const softirq_to_name[NR_SOFTIRQS] = {
64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
65 "TASKLET", "SCHED", "HRTIMER", "RCU"
69 * we cannot loop indefinitely here to avoid userspace starvation,
70 * but we also don't want to introduce a worst case 1/HZ latency
71 * to the pending events, so lets the scheduler to balance
72 * the softirq load for us.
74 static void wakeup_softirqd(void)
76 /* Interrupts are disabled: no need to stop preemption */
77 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 DEFINE_PER_CPU(int, hardirqs_enabled);
85 DEFINE_PER_CPU(int, hardirq_context);
86 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
91 * SOFTIRQ_OFFSET usage:
93 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
94 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
99 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
102 * This lets us distinguish between whether we are currently processing
103 * softirq and whether we just have bh disabled.
105 #ifdef CONFIG_PREEMPT_RT
108 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
109 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
110 * softirq disabled section to be preempted.
112 * The per task counter is used for softirq_count(), in_softirq() and
113 * in_serving_softirqs() because these counts are only valid when the task
114 * holding softirq_ctrl::lock is running.
116 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
117 * the task which is in a softirq disabled section is preempted or blocks.
119 struct softirq_ctrl {
124 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
125 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
129 * local_bh_blocked() - Check for idle whether BH processing is blocked
131 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
133 * This is invoked from the idle task to guard against false positive
134 * softirq pending warnings, which would happen when the task which holds
135 * softirq_ctrl::lock was the only running task on the CPU and blocks on
138 bool local_bh_blocked(void)
140 return __this_cpu_read(softirq_ctrl.cnt) != 0;
143 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
148 WARN_ON_ONCE(in_hardirq());
150 /* First entry of a task into a BH disabled section? */
151 if (!current->softirq_disable_cnt) {
153 local_lock(&softirq_ctrl.lock);
154 /* Required to meet the RCU bottomhalf requirements. */
157 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
162 * Track the per CPU softirq disabled state. On RT this is per CPU
163 * state to allow preemption of bottom half disabled sections.
165 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
167 * Reflect the result in the task state to prevent recursion on the
168 * local lock and to make softirq_count() & al work.
170 current->softirq_disable_cnt = newcnt;
172 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
173 raw_local_irq_save(flags);
174 lockdep_softirqs_off(ip);
175 raw_local_irq_restore(flags);
178 EXPORT_SYMBOL(__local_bh_disable_ip);
180 static void __local_bh_enable(unsigned int cnt, bool unlock)
185 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
186 this_cpu_read(softirq_ctrl.cnt));
188 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
189 raw_local_irq_save(flags);
190 lockdep_softirqs_on(_RET_IP_);
191 raw_local_irq_restore(flags);
194 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
195 current->softirq_disable_cnt = newcnt;
197 if (!newcnt && unlock) {
199 local_unlock(&softirq_ctrl.lock);
203 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
205 bool preempt_on = preemptible();
210 WARN_ON_ONCE(in_hardirq());
211 lockdep_assert_irqs_enabled();
213 local_irq_save(flags);
214 curcnt = __this_cpu_read(softirq_ctrl.cnt);
217 * If this is not reenabling soft interrupts, no point in trying to
223 pending = local_softirq_pending();
228 * If this was called from non preemptible context, wake up the
237 * Adjust softirq count to SOFTIRQ_OFFSET which makes
238 * in_serving_softirq() become true.
240 cnt = SOFTIRQ_OFFSET;
241 __local_bh_enable(cnt, false);
245 __local_bh_enable(cnt, preempt_on);
246 local_irq_restore(flags);
248 EXPORT_SYMBOL(__local_bh_enable_ip);
250 void softirq_preempt(void)
252 if (WARN_ON_ONCE(!preemptible()))
255 if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
258 __local_bh_enable(SOFTIRQ_OFFSET, true);
259 /* preemption point */
260 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
264 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
265 * to acquire the per CPU local lock for reentrancy protection.
267 static inline void ksoftirqd_run_begin(void)
269 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
273 /* Counterpart to ksoftirqd_run_begin() */
274 static inline void ksoftirqd_run_end(void)
276 __local_bh_enable(SOFTIRQ_OFFSET, true);
277 WARN_ON_ONCE(in_interrupt());
281 static inline void softirq_handle_begin(void) { }
282 static inline void softirq_handle_end(void) { }
284 static inline bool should_wake_ksoftirqd(void)
286 return !this_cpu_read(softirq_ctrl.cnt);
289 static inline void invoke_softirq(void)
291 if (should_wake_ksoftirqd())
296 * flush_smp_call_function_queue() can raise a soft interrupt in a function
297 * call. On RT kernels this is undesired and the only known functionality
298 * in the block layer which does this is disabled on RT. If soft interrupts
299 * get raised which haven't been raised before the flush, warn so it can be
302 void do_softirq_post_smp_call_flush(unsigned int was_pending)
304 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
308 #else /* CONFIG_PREEMPT_RT */
311 * This one is for softirq.c-internal use, where hardirqs are disabled
314 #ifdef CONFIG_TRACE_IRQFLAGS
315 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
319 WARN_ON_ONCE(in_hardirq());
321 raw_local_irq_save(flags);
323 * The preempt tracer hooks into preempt_count_add and will break
324 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
325 * is set and before current->softirq_enabled is cleared.
326 * We must manually increment preempt_count here and manually
327 * call the trace_preempt_off later.
329 __preempt_count_add(cnt);
331 * Were softirqs turned off above:
333 if (softirq_count() == (cnt & SOFTIRQ_MASK))
334 lockdep_softirqs_off(ip);
335 raw_local_irq_restore(flags);
337 if (preempt_count() == cnt) {
338 #ifdef CONFIG_DEBUG_PREEMPT
339 current->preempt_disable_ip = get_lock_parent_ip();
341 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
344 EXPORT_SYMBOL(__local_bh_disable_ip);
345 #endif /* CONFIG_TRACE_IRQFLAGS */
347 static void __local_bh_enable(unsigned int cnt)
349 lockdep_assert_irqs_disabled();
351 if (preempt_count() == cnt)
352 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
354 if (softirq_count() == (cnt & SOFTIRQ_MASK))
355 lockdep_softirqs_on(_RET_IP_);
357 __preempt_count_sub(cnt);
361 * Special-case - softirqs can safely be enabled by __do_softirq(),
362 * without processing still-pending softirqs:
364 void _local_bh_enable(void)
366 WARN_ON_ONCE(in_hardirq());
367 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
369 EXPORT_SYMBOL(_local_bh_enable);
371 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
373 WARN_ON_ONCE(in_hardirq());
374 lockdep_assert_irqs_enabled();
375 #ifdef CONFIG_TRACE_IRQFLAGS
379 * Are softirqs going to be turned on now:
381 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
382 lockdep_softirqs_on(ip);
384 * Keep preemption disabled until we are done with
385 * softirq processing:
387 __preempt_count_sub(cnt - 1);
389 if (unlikely(!in_interrupt() && local_softirq_pending())) {
391 * Run softirq if any pending. And do it in its own stack
392 * as we may be calling this deep in a task call stack already.
398 #ifdef CONFIG_TRACE_IRQFLAGS
401 preempt_check_resched();
403 EXPORT_SYMBOL(__local_bh_enable_ip);
405 static inline void softirq_handle_begin(void)
407 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
410 static inline void softirq_handle_end(void)
412 __local_bh_enable(SOFTIRQ_OFFSET);
413 WARN_ON_ONCE(in_interrupt());
416 static inline void ksoftirqd_run_begin(void)
421 static inline void ksoftirqd_run_end(void)
426 static inline bool should_wake_ksoftirqd(void)
431 static inline void invoke_softirq(void)
433 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
434 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
436 * We can safely execute softirq on the current stack if
437 * it is the irq stack, because it should be near empty
443 * Otherwise, irq_exit() is called on the task stack that can
444 * be potentially deep already. So call softirq in its own stack
445 * to prevent from any overrun.
447 do_softirq_own_stack();
454 asmlinkage __visible void do_softirq(void)
462 local_irq_save(flags);
464 pending = local_softirq_pending();
467 do_softirq_own_stack();
469 local_irq_restore(flags);
472 #endif /* !CONFIG_PREEMPT_RT */
475 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
476 * but break the loop if need_resched() is set or after 2 ms.
477 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
478 * certain cases, such as stop_machine(), jiffies may cease to
479 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
480 * well to make sure we eventually return from this method.
482 * These limits have been established via experimentation.
483 * The two things to balance is latency against fairness -
484 * we want to handle softirqs as soon as possible, but they
485 * should not be able to lock up the box.
487 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
488 #define MAX_SOFTIRQ_RESTART 10
490 #ifdef CONFIG_TRACE_IRQFLAGS
492 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
493 * to keep the lockdep irq context tracking as tight as possible in order to
494 * not miss-qualify lock contexts and miss possible deadlocks.
497 static inline bool lockdep_softirq_start(void)
499 bool in_hardirq = false;
501 if (lockdep_hardirq_context()) {
503 lockdep_hardirq_exit();
506 lockdep_softirq_enter();
511 static inline void lockdep_softirq_end(bool in_hardirq)
513 lockdep_softirq_exit();
516 lockdep_hardirq_enter();
519 static inline bool lockdep_softirq_start(void) { return false; }
520 static inline void lockdep_softirq_end(bool in_hardirq) { }
523 asmlinkage __visible void __softirq_entry __do_softirq(void)
525 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
526 unsigned long old_flags = current->flags;
527 int max_restart = MAX_SOFTIRQ_RESTART;
528 struct softirq_action *h;
534 * Mask out PF_MEMALLOC as the current task context is borrowed for the
535 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
536 * again if the socket is related to swapping.
538 current->flags &= ~PF_MEMALLOC;
540 pending = local_softirq_pending();
542 softirq_handle_begin();
543 in_hardirq = lockdep_softirq_start();
544 account_softirq_enter(current);
547 /* Reset the pending bitmask before enabling irqs */
548 set_softirq_pending(0);
554 while ((softirq_bit = ffs(pending))) {
558 h += softirq_bit - 1;
560 vec_nr = h - softirq_vec;
561 prev_count = preempt_count();
563 kstat_incr_softirqs_this_cpu(vec_nr);
565 trace_softirq_entry(vec_nr);
567 trace_softirq_exit(vec_nr);
568 if (unlikely(prev_count != preempt_count())) {
569 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
570 vec_nr, softirq_to_name[vec_nr], h->action,
571 prev_count, preempt_count());
572 preempt_count_set(prev_count);
575 pending >>= softirq_bit;
578 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
579 __this_cpu_read(ksoftirqd) == current)
584 pending = local_softirq_pending();
586 if (time_before(jiffies, end) && !need_resched() &&
593 account_softirq_exit(current);
594 lockdep_softirq_end(in_hardirq);
595 softirq_handle_end();
596 current_restore_flags(old_flags, PF_MEMALLOC);
600 * irq_enter_rcu - Enter an interrupt context with RCU watching
602 void irq_enter_rcu(void)
606 if (tick_nohz_full_cpu(smp_processor_id()) ||
607 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
610 account_hardirq_enter(current);
614 * irq_enter - Enter an interrupt context including RCU update
622 static inline void tick_irq_exit(void)
624 #ifdef CONFIG_NO_HZ_COMMON
625 int cpu = smp_processor_id();
627 /* Make sure that timer wheel updates are propagated */
628 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
630 tick_nohz_irq_exit();
635 #ifdef CONFIG_PREEMPT_RT
636 DEFINE_PER_CPU(struct task_struct *, timersd);
637 DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
639 static void wake_timersd(void)
641 struct task_struct *tsk = __this_cpu_read(timersd);
644 wake_up_process(tsk);
649 static inline void wake_timersd(void) { }
653 static inline void __irq_exit_rcu(void)
655 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
658 lockdep_assert_irqs_disabled();
660 account_hardirq_exit(current);
661 preempt_count_sub(HARDIRQ_OFFSET);
662 if (!in_interrupt() && local_softirq_pending())
665 if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
666 !(in_nmi() | in_hardirq()))
673 * irq_exit_rcu() - Exit an interrupt context without updating RCU
675 * Also processes softirqs if needed and possible.
677 void irq_exit_rcu(void)
681 lockdep_hardirq_exit();
685 * irq_exit - Exit an interrupt context, update RCU and lockdep
687 * Also processes softirqs if needed and possible.
694 lockdep_hardirq_exit();
698 * This function must run with irqs disabled!
700 inline void raise_softirq_irqoff(unsigned int nr)
702 __raise_softirq_irqoff(nr);
705 * If we're in an interrupt or softirq, we're done
706 * (this also catches softirq-disabled code). We will
707 * actually run the softirq once we return from
708 * the irq or softirq.
710 * Otherwise we wake up ksoftirqd to make sure we
711 * schedule the softirq soon.
713 if (!in_interrupt() && should_wake_ksoftirqd())
717 void raise_softirq(unsigned int nr)
721 local_irq_save(flags);
722 raise_softirq_irqoff(nr);
723 local_irq_restore(flags);
726 void __raise_softirq_irqoff(unsigned int nr)
728 lockdep_assert_irqs_disabled();
729 trace_softirq_raise(nr);
730 or_softirq_pending(1UL << nr);
733 void open_softirq(int nr, void (*action)(struct softirq_action *))
735 softirq_vec[nr].action = action;
741 struct tasklet_head {
742 struct tasklet_struct *head;
743 struct tasklet_struct **tail;
746 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
747 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
749 static void __tasklet_schedule_common(struct tasklet_struct *t,
750 struct tasklet_head __percpu *headp,
751 unsigned int softirq_nr)
753 struct tasklet_head *head;
756 local_irq_save(flags);
757 head = this_cpu_ptr(headp);
760 head->tail = &(t->next);
761 raise_softirq_irqoff(softirq_nr);
762 local_irq_restore(flags);
765 void __tasklet_schedule(struct tasklet_struct *t)
767 __tasklet_schedule_common(t, &tasklet_vec,
770 EXPORT_SYMBOL(__tasklet_schedule);
772 void __tasklet_hi_schedule(struct tasklet_struct *t)
774 __tasklet_schedule_common(t, &tasklet_hi_vec,
777 EXPORT_SYMBOL(__tasklet_hi_schedule);
779 static bool tasklet_clear_sched(struct tasklet_struct *t)
781 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
782 wake_up_var(&t->state);
786 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
787 t->use_callback ? "callback" : "func",
788 t->use_callback ? (void *)t->callback : (void *)t->func);
793 static void tasklet_action_common(struct softirq_action *a,
794 struct tasklet_head *tl_head,
795 unsigned int softirq_nr)
797 struct tasklet_struct *list;
800 list = tl_head->head;
801 tl_head->head = NULL;
802 tl_head->tail = &tl_head->head;
806 struct tasklet_struct *t = list;
810 if (tasklet_trylock(t)) {
811 if (!atomic_read(&t->count)) {
812 if (tasklet_clear_sched(t)) {
813 if (t->use_callback) {
814 trace_tasklet_entry(t, t->callback);
816 trace_tasklet_exit(t, t->callback);
818 trace_tasklet_entry(t, t->func);
820 trace_tasklet_exit(t, t->func);
832 tl_head->tail = &t->next;
833 __raise_softirq_irqoff(softirq_nr);
838 static __latent_entropy void tasklet_action(struct softirq_action *a)
840 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
843 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
845 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
848 void tasklet_setup(struct tasklet_struct *t,
849 void (*callback)(struct tasklet_struct *))
853 atomic_set(&t->count, 0);
854 t->callback = callback;
855 t->use_callback = true;
858 EXPORT_SYMBOL(tasklet_setup);
860 void tasklet_init(struct tasklet_struct *t,
861 void (*func)(unsigned long), unsigned long data)
865 atomic_set(&t->count, 0);
867 t->use_callback = false;
870 EXPORT_SYMBOL(tasklet_init);
872 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
874 * Do not use in new code. Waiting for tasklets from atomic contexts is
875 * error prone and should be avoided.
877 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
879 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
880 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
882 * Prevent a live lock when current preempted soft
883 * interrupt processing or prevents ksoftirqd from
884 * running. If the tasklet runs on a different CPU
885 * then this has no effect other than doing the BH
886 * disable/enable dance for nothing.
895 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
898 void tasklet_kill(struct tasklet_struct *t)
901 pr_notice("Attempt to kill tasklet from interrupt\n");
903 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
904 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
906 tasklet_unlock_wait(t);
907 tasklet_clear_sched(t);
909 EXPORT_SYMBOL(tasklet_kill);
911 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
912 void tasklet_unlock(struct tasklet_struct *t)
914 smp_mb__before_atomic();
915 clear_bit(TASKLET_STATE_RUN, &t->state);
916 smp_mb__after_atomic();
917 wake_up_var(&t->state);
919 EXPORT_SYMBOL_GPL(tasklet_unlock);
921 void tasklet_unlock_wait(struct tasklet_struct *t)
923 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
925 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
928 void __init softirq_init(void)
932 for_each_possible_cpu(cpu) {
933 per_cpu(tasklet_vec, cpu).tail =
934 &per_cpu(tasklet_vec, cpu).head;
935 per_cpu(tasklet_hi_vec, cpu).tail =
936 &per_cpu(tasklet_hi_vec, cpu).head;
939 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
940 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
943 static int ksoftirqd_should_run(unsigned int cpu)
945 return local_softirq_pending();
948 static void run_ksoftirqd(unsigned int cpu)
950 ksoftirqd_run_begin();
951 if (local_softirq_pending()) {
953 * We can safely run softirq on inline stack, as we are not deep
954 * in the task stack here.
964 #ifdef CONFIG_HOTPLUG_CPU
965 static int takeover_tasklets(unsigned int cpu)
967 /* CPU is dead, so no lock needed. */
970 /* Find end, append list for that CPU. */
971 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
972 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
973 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
974 per_cpu(tasklet_vec, cpu).head = NULL;
975 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
977 raise_softirq_irqoff(TASKLET_SOFTIRQ);
979 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
980 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
981 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
982 per_cpu(tasklet_hi_vec, cpu).head = NULL;
983 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
985 raise_softirq_irqoff(HI_SOFTIRQ);
991 #define takeover_tasklets NULL
992 #endif /* CONFIG_HOTPLUG_CPU */
994 static struct smp_hotplug_thread softirq_threads = {
996 .thread_should_run = ksoftirqd_should_run,
997 .thread_fn = run_ksoftirqd,
998 .thread_comm = "ksoftirqd/%u",
1001 #ifdef CONFIG_PREEMPT_RT
1002 static void timersd_setup(unsigned int cpu)
1004 sched_set_fifo_low(current);
1007 static int timersd_should_run(unsigned int cpu)
1009 return local_pending_timers();
1012 static void run_timersd(unsigned int cpu)
1014 unsigned int timer_si;
1016 ksoftirqd_run_begin();
1018 timer_si = local_pending_timers();
1019 __this_cpu_write(pending_timer_softirq, 0);
1020 or_softirq_pending(timer_si);
1024 ksoftirqd_run_end();
1027 static void raise_ktimers_thread(unsigned int nr)
1029 trace_softirq_raise(nr);
1030 __this_cpu_or(pending_timer_softirq, 1 << nr);
1033 void raise_hrtimer_softirq(void)
1035 raise_ktimers_thread(HRTIMER_SOFTIRQ);
1038 void raise_timer_softirq(void)
1040 unsigned long flags;
1042 local_irq_save(flags);
1043 raise_ktimers_thread(TIMER_SOFTIRQ);
1045 local_irq_restore(flags);
1048 static struct smp_hotplug_thread timer_threads = {
1050 .setup = timersd_setup,
1051 .thread_should_run = timersd_should_run,
1052 .thread_fn = run_timersd,
1053 .thread_comm = "ktimers/%u",
1057 static __init int spawn_ksoftirqd(void)
1059 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1061 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1062 #ifdef CONFIG_PREEMPT_RT
1063 BUG_ON(smpboot_register_percpu_thread(&timer_threads));
1067 early_initcall(spawn_ksoftirqd);
1070 * [ These __weak aliases are kept in a separate compilation unit, so that
1071 * GCC does not inline them incorrectly. ]
1074 int __init __weak early_irq_init(void)
1079 int __init __weak arch_probe_nr_irqs(void)
1081 return NR_IRQS_LEGACY;
1084 int __init __weak arch_early_irq_init(void)
1089 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)