1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/softirq.c
5 * Copyright (C) 1992 Linus Torvalds
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
17 #include <linux/notifier.h>
18 #include <linux/percpu.h>
19 #include <linux/cpu.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ftrace.h>
24 #include <linux/smp.h>
25 #include <linux/smpboot.h>
26 #include <linux/tick.h>
27 #include <linux/irq.h>
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52 EXPORT_PER_CPU_SYMBOL(irq_stat);
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 const char * const softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 static void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82 * unless we're doing some of the synchronous softirqs.
84 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85 static bool ksoftirqd_running(unsigned long pending)
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
89 if (pending & SOFTIRQ_NOW_MASK)
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
95 #ifdef CONFIG_TRACE_IRQFLAGS
96 DEFINE_PER_CPU(int, hardirqs_enabled);
97 DEFINE_PER_CPU(int, hardirq_context);
98 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
99 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
103 * preempt_count and SOFTIRQ_OFFSET usage:
104 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
105 * softirq processing.
106 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
107 * on local_bh_disable or local_bh_enable.
108 * This lets us distinguish between whether we are currently processing
109 * softirq and whether we just have bh disabled.
112 #ifdef CONFIG_TRACE_IRQFLAGS
114 * This is for softirq.c-internal use, where hardirqs are disabled
117 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
121 WARN_ON_ONCE(in_irq());
123 raw_local_irq_save(flags);
125 * The preempt tracer hooks into preempt_count_add and will break
126 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
127 * is set and before current->softirq_enabled is cleared.
128 * We must manually increment preempt_count here and manually
129 * call the trace_preempt_off later.
131 __preempt_count_add(cnt);
133 * Were softirqs turned off above:
135 if (softirq_count() == (cnt & SOFTIRQ_MASK))
136 lockdep_softirqs_off(ip);
137 raw_local_irq_restore(flags);
139 if (preempt_count() == cnt) {
140 #ifdef CONFIG_DEBUG_PREEMPT
141 current->preempt_disable_ip = get_lock_parent_ip();
143 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
146 EXPORT_SYMBOL(__local_bh_disable_ip);
147 #endif /* CONFIG_TRACE_IRQFLAGS */
149 static void __local_bh_enable(unsigned int cnt)
151 lockdep_assert_irqs_disabled();
153 if (preempt_count() == cnt)
154 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
156 if (softirq_count() == (cnt & SOFTIRQ_MASK))
157 lockdep_softirqs_on(_RET_IP_);
159 __preempt_count_sub(cnt);
163 * Special-case - softirqs can safely be enabled by __do_softirq(),
164 * without processing still-pending softirqs:
166 void _local_bh_enable(void)
168 WARN_ON_ONCE(in_irq());
169 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
171 EXPORT_SYMBOL(_local_bh_enable);
173 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
175 WARN_ON_ONCE(in_irq());
176 lockdep_assert_irqs_enabled();
177 #ifdef CONFIG_TRACE_IRQFLAGS
181 * Are softirqs going to be turned on now:
183 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
184 lockdep_softirqs_on(ip);
186 * Keep preemption disabled until we are done with
187 * softirq processing:
189 preempt_count_sub(cnt - 1);
191 if (unlikely(!in_interrupt() && local_softirq_pending())) {
193 * Run softirq if any pending. And do it in its own stack
194 * as we may be calling this deep in a task call stack already.
200 #ifdef CONFIG_TRACE_IRQFLAGS
203 preempt_check_resched();
205 EXPORT_SYMBOL(__local_bh_enable_ip);
207 static inline void invoke_softirq(void)
209 if (ksoftirqd_running(local_softirq_pending()))
212 if (!force_irqthreads) {
213 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
215 * We can safely execute softirq on the current stack if
216 * it is the irq stack, because it should be near empty
222 * Otherwise, irq_exit() is called on the task stack that can
223 * be potentially deep already. So call softirq in its own stack
224 * to prevent from any overrun.
226 do_softirq_own_stack();
233 asmlinkage __visible void do_softirq(void)
241 local_irq_save(flags);
243 pending = local_softirq_pending();
245 if (pending && !ksoftirqd_running(pending))
246 do_softirq_own_stack();
248 local_irq_restore(flags);
252 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
253 * but break the loop if need_resched() is set or after 2 ms.
254 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
255 * certain cases, such as stop_machine(), jiffies may cease to
256 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
257 * well to make sure we eventually return from this method.
259 * These limits have been established via experimentation.
260 * The two things to balance is latency against fairness -
261 * we want to handle softirqs as soon as possible, but they
262 * should not be able to lock up the box.
264 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
265 #define MAX_SOFTIRQ_RESTART 10
267 #ifdef CONFIG_TRACE_IRQFLAGS
269 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
270 * to keep the lockdep irq context tracking as tight as possible in order to
271 * not miss-qualify lock contexts and miss possible deadlocks.
274 static inline bool lockdep_softirq_start(void)
276 bool in_hardirq = false;
278 if (lockdep_hardirq_context()) {
280 lockdep_hardirq_exit();
283 lockdep_softirq_enter();
288 static inline void lockdep_softirq_end(bool in_hardirq)
290 lockdep_softirq_exit();
293 lockdep_hardirq_enter();
296 static inline bool lockdep_softirq_start(void) { return false; }
297 static inline void lockdep_softirq_end(bool in_hardirq) { }
300 asmlinkage __visible void __softirq_entry __do_softirq(void)
302 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
303 unsigned long old_flags = current->flags;
304 int max_restart = MAX_SOFTIRQ_RESTART;
305 struct softirq_action *h;
311 * Mask out PF_MEMALLOC as the current task context is borrowed for the
312 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
313 * again if the socket is related to swapping.
315 current->flags &= ~PF_MEMALLOC;
317 pending = local_softirq_pending();
319 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
320 in_hardirq = lockdep_softirq_start();
321 account_softirq_enter(current);
324 /* Reset the pending bitmask before enabling irqs */
325 set_softirq_pending(0);
331 while ((softirq_bit = ffs(pending))) {
335 h += softirq_bit - 1;
337 vec_nr = h - softirq_vec;
338 prev_count = preempt_count();
340 kstat_incr_softirqs_this_cpu(vec_nr);
342 trace_softirq_entry(vec_nr);
344 trace_softirq_exit(vec_nr);
345 if (unlikely(prev_count != preempt_count())) {
346 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
347 vec_nr, softirq_to_name[vec_nr], h->action,
348 prev_count, preempt_count());
349 preempt_count_set(prev_count);
352 pending >>= softirq_bit;
355 if (__this_cpu_read(ksoftirqd) == current)
359 pending = local_softirq_pending();
361 if (time_before(jiffies, end) && !need_resched() &&
368 account_softirq_exit(current);
369 lockdep_softirq_end(in_hardirq);
370 __local_bh_enable(SOFTIRQ_OFFSET);
371 WARN_ON_ONCE(in_interrupt());
372 current_restore_flags(old_flags, PF_MEMALLOC);
376 * irq_enter_rcu - Enter an interrupt context with RCU watching
378 void irq_enter_rcu(void)
382 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
385 account_hardirq_enter(current);
389 * irq_enter - Enter an interrupt context including RCU update
397 static inline void tick_irq_exit(void)
399 #ifdef CONFIG_NO_HZ_COMMON
400 int cpu = smp_processor_id();
402 /* Make sure that timer wheel updates are propagated */
403 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
405 tick_nohz_irq_exit();
410 static inline void __irq_exit_rcu(void)
412 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
415 lockdep_assert_irqs_disabled();
417 account_hardirq_exit(current);
418 preempt_count_sub(HARDIRQ_OFFSET);
419 if (!in_interrupt() && local_softirq_pending())
426 * irq_exit_rcu() - Exit an interrupt context without updating RCU
428 * Also processes softirqs if needed and possible.
430 void irq_exit_rcu(void)
434 lockdep_hardirq_exit();
438 * irq_exit - Exit an interrupt context, update RCU and lockdep
440 * Also processes softirqs if needed and possible.
447 lockdep_hardirq_exit();
451 * This function must run with irqs disabled!
453 inline void raise_softirq_irqoff(unsigned int nr)
455 __raise_softirq_irqoff(nr);
458 * If we're in an interrupt or softirq, we're done
459 * (this also catches softirq-disabled code). We will
460 * actually run the softirq once we return from
461 * the irq or softirq.
463 * Otherwise we wake up ksoftirqd to make sure we
464 * schedule the softirq soon.
470 void raise_softirq(unsigned int nr)
474 local_irq_save(flags);
475 raise_softirq_irqoff(nr);
476 local_irq_restore(flags);
479 void __raise_softirq_irqoff(unsigned int nr)
481 lockdep_assert_irqs_disabled();
482 trace_softirq_raise(nr);
483 or_softirq_pending(1UL << nr);
486 void open_softirq(int nr, void (*action)(struct softirq_action *))
488 softirq_vec[nr].action = action;
494 struct tasklet_head {
495 struct tasklet_struct *head;
496 struct tasklet_struct **tail;
499 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
500 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
502 static void __tasklet_schedule_common(struct tasklet_struct *t,
503 struct tasklet_head __percpu *headp,
504 unsigned int softirq_nr)
506 struct tasklet_head *head;
509 local_irq_save(flags);
510 head = this_cpu_ptr(headp);
513 head->tail = &(t->next);
514 raise_softirq_irqoff(softirq_nr);
515 local_irq_restore(flags);
518 void __tasklet_schedule(struct tasklet_struct *t)
520 __tasklet_schedule_common(t, &tasklet_vec,
523 EXPORT_SYMBOL(__tasklet_schedule);
525 void __tasklet_hi_schedule(struct tasklet_struct *t)
527 __tasklet_schedule_common(t, &tasklet_hi_vec,
530 EXPORT_SYMBOL(__tasklet_hi_schedule);
532 static void tasklet_action_common(struct softirq_action *a,
533 struct tasklet_head *tl_head,
534 unsigned int softirq_nr)
536 struct tasklet_struct *list;
539 list = tl_head->head;
540 tl_head->head = NULL;
541 tl_head->tail = &tl_head->head;
545 struct tasklet_struct *t = list;
549 if (tasklet_trylock(t)) {
550 if (!atomic_read(&t->count)) {
551 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
567 tl_head->tail = &t->next;
568 __raise_softirq_irqoff(softirq_nr);
573 static __latent_entropy void tasklet_action(struct softirq_action *a)
575 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
578 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
580 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
583 void tasklet_setup(struct tasklet_struct *t,
584 void (*callback)(struct tasklet_struct *))
588 atomic_set(&t->count, 0);
589 t->callback = callback;
590 t->use_callback = true;
593 EXPORT_SYMBOL(tasklet_setup);
595 void tasklet_init(struct tasklet_struct *t,
596 void (*func)(unsigned long), unsigned long data)
600 atomic_set(&t->count, 0);
602 t->use_callback = false;
605 EXPORT_SYMBOL(tasklet_init);
607 void tasklet_kill(struct tasklet_struct *t)
610 pr_notice("Attempt to kill tasklet from interrupt\n");
612 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
615 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
617 tasklet_unlock_wait(t);
618 clear_bit(TASKLET_STATE_SCHED, &t->state);
620 EXPORT_SYMBOL(tasklet_kill);
622 void __init softirq_init(void)
626 for_each_possible_cpu(cpu) {
627 per_cpu(tasklet_vec, cpu).tail =
628 &per_cpu(tasklet_vec, cpu).head;
629 per_cpu(tasklet_hi_vec, cpu).tail =
630 &per_cpu(tasklet_hi_vec, cpu).head;
633 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
634 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
637 static int ksoftirqd_should_run(unsigned int cpu)
639 return local_softirq_pending();
642 static void run_ksoftirqd(unsigned int cpu)
645 if (local_softirq_pending()) {
647 * We can safely run softirq on inline stack, as we are not deep
648 * in the task stack here.
658 #ifdef CONFIG_HOTPLUG_CPU
660 * tasklet_kill_immediate is called to remove a tasklet which can already be
661 * scheduled for execution on @cpu.
663 * Unlike tasklet_kill, this function removes the tasklet
664 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
666 * When this function is called, @cpu must be in the CPU_DEAD state.
668 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
670 struct tasklet_struct **i;
672 BUG_ON(cpu_online(cpu));
673 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
675 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
678 /* CPU is dead, so no lock needed. */
679 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
682 /* If this was the tail element, move the tail ptr */
684 per_cpu(tasklet_vec, cpu).tail = i;
691 static int takeover_tasklets(unsigned int cpu)
693 /* CPU is dead, so no lock needed. */
696 /* Find end, append list for that CPU. */
697 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
698 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
699 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
700 per_cpu(tasklet_vec, cpu).head = NULL;
701 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
703 raise_softirq_irqoff(TASKLET_SOFTIRQ);
705 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
706 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
707 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
708 per_cpu(tasklet_hi_vec, cpu).head = NULL;
709 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
711 raise_softirq_irqoff(HI_SOFTIRQ);
717 #define takeover_tasklets NULL
718 #endif /* CONFIG_HOTPLUG_CPU */
720 static struct smp_hotplug_thread softirq_threads = {
722 .thread_should_run = ksoftirqd_should_run,
723 .thread_fn = run_ksoftirqd,
724 .thread_comm = "ksoftirqd/%u",
727 static __init int spawn_ksoftirqd(void)
729 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
731 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
735 early_initcall(spawn_ksoftirqd);
738 * [ These __weak aliases are kept in a separate compilation unit, so that
739 * GCC does not inline them incorrectly. ]
742 int __init __weak early_irq_init(void)
747 int __init __weak arch_probe_nr_irqs(void)
749 return NR_IRQS_LEGACY;
752 int __init __weak arch_early_irq_init(void)
757 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)