2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #include <linux/export.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ftrace.h>
23 #include <linux/smp.h>
24 #include <linux/smpboot.h>
25 #include <linux/tick.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/irq.h>
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
55 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
57 char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59 "TASKLET", "SCHED", "HRTIMER", "RCU"
63 * we cannot loop indefinitely here to avoid userspace starvation,
64 * but we also don't want to introduce a worst case 1/HZ latency
65 * to the pending events, so lets the scheduler to balance
66 * the softirq load for us.
68 static void wakeup_softirqd(void)
70 /* Interrupts are disabled: no need to stop preemption */
71 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
73 if (tsk && tsk->state != TASK_RUNNING)
78 * preempt_count and SOFTIRQ_OFFSET usage:
79 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
81 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82 * on local_bh_disable or local_bh_enable.
83 * This lets us distinguish between whether we are currently processing
84 * softirq and whether we just have bh disabled.
88 * This one is for softirq.c-internal use,
89 * where hardirqs are disabled legitimately:
91 #ifdef CONFIG_TRACE_IRQFLAGS
92 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
96 WARN_ON_ONCE(in_irq());
98 raw_local_irq_save(flags);
100 * The preempt tracer hooks into preempt_count_add and will break
101 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102 * is set and before current->softirq_enabled is cleared.
103 * We must manually increment preempt_count here and manually
104 * call the trace_preempt_off later.
106 __preempt_count_add(cnt);
108 * Were softirqs turned off above:
110 if (softirq_count() == cnt)
111 trace_softirqs_off(ip);
112 raw_local_irq_restore(flags);
114 if (preempt_count() == cnt)
115 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
117 #else /* !CONFIG_TRACE_IRQFLAGS */
118 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
120 preempt_count_add(cnt);
123 #endif /* CONFIG_TRACE_IRQFLAGS */
125 void local_bh_disable(void)
127 __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
130 EXPORT_SYMBOL(local_bh_disable);
132 static void __local_bh_enable(unsigned int cnt)
134 WARN_ON_ONCE(!irqs_disabled());
136 if (softirq_count() == cnt)
137 trace_softirqs_on(_RET_IP_);
138 preempt_count_sub(cnt);
142 * Special-case - softirqs can safely be enabled in
143 * cond_resched_softirq(), or by __do_softirq(),
144 * without processing still-pending softirqs:
146 void _local_bh_enable(void)
148 WARN_ON_ONCE(in_irq());
149 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
152 EXPORT_SYMBOL(_local_bh_enable);
154 static inline void _local_bh_enable_ip(unsigned long ip)
156 WARN_ON_ONCE(in_irq() || irqs_disabled());
157 #ifdef CONFIG_TRACE_IRQFLAGS
161 * Are softirqs going to be turned on now:
163 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
164 trace_softirqs_on(ip);
166 * Keep preemption disabled until we are done with
167 * softirq processing:
169 preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
171 if (unlikely(!in_interrupt() && local_softirq_pending())) {
173 * Run softirq if any pending. And do it in its own stack
174 * as we may be calling this deep in a task call stack already.
180 #ifdef CONFIG_TRACE_IRQFLAGS
183 preempt_check_resched();
186 void local_bh_enable(void)
188 _local_bh_enable_ip(_RET_IP_);
190 EXPORT_SYMBOL(local_bh_enable);
192 void local_bh_enable_ip(unsigned long ip)
194 _local_bh_enable_ip(ip);
196 EXPORT_SYMBOL(local_bh_enable_ip);
199 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
200 * but break the loop if need_resched() is set or after 2 ms.
201 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
202 * certain cases, such as stop_machine(), jiffies may cease to
203 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
204 * well to make sure we eventually return from this method.
206 * These limits have been established via experimentation.
207 * The two things to balance is latency against fairness -
208 * we want to handle softirqs as soon as possible, but they
209 * should not be able to lock up the box.
211 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
212 #define MAX_SOFTIRQ_RESTART 10
214 #ifdef CONFIG_TRACE_IRQFLAGS
216 * Convoluted means of passing __do_softirq() a message through the various
217 * architecture execute_on_stack() bits.
219 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
220 * to keep the lockdep irq context tracking as tight as possible in order to
221 * not miss-qualify lock contexts and miss possible deadlocks.
223 static DEFINE_PER_CPU(int, softirq_from_hardirq);
225 static inline void lockdep_softirq_from_hardirq(void)
227 this_cpu_write(softirq_from_hardirq, 1);
230 static inline void lockdep_softirq_start(void)
232 if (this_cpu_read(softirq_from_hardirq))
233 trace_hardirq_exit();
234 lockdep_softirq_enter();
237 static inline void lockdep_softirq_end(void)
239 lockdep_softirq_exit();
240 if (this_cpu_read(softirq_from_hardirq)) {
241 this_cpu_write(softirq_from_hardirq, 0);
242 trace_hardirq_enter();
247 static inline void lockdep_softirq_from_hardirq(void) { }
248 static inline void lockdep_softirq_start(void) { }
249 static inline void lockdep_softirq_end(void) { }
252 asmlinkage void __do_softirq(void)
254 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
255 unsigned long old_flags = current->flags;
256 int max_restart = MAX_SOFTIRQ_RESTART;
257 struct softirq_action *h;
262 * Mask out PF_MEMALLOC s current task context is borrowed for the
263 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
264 * again if the socket is related to swap
266 current->flags &= ~PF_MEMALLOC;
268 pending = local_softirq_pending();
269 account_irq_enter_time(current);
271 __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
272 lockdep_softirq_start();
274 cpu = smp_processor_id();
276 /* Reset the pending bitmask before enabling irqs */
277 set_softirq_pending(0);
285 unsigned int vec_nr = h - softirq_vec;
286 int prev_count = preempt_count();
288 kstat_incr_softirqs_this_cpu(vec_nr);
290 trace_softirq_entry(vec_nr);
292 trace_softirq_exit(vec_nr);
293 if (unlikely(prev_count != preempt_count())) {
294 printk(KERN_ERR "huh, entered softirq %u %s %p"
295 "with preempt_count %08x,"
296 " exited with %08x?\n", vec_nr,
297 softirq_to_name[vec_nr], h->action,
298 prev_count, preempt_count());
299 preempt_count_set(prev_count);
310 pending = local_softirq_pending();
312 if (time_before(jiffies, end) && !need_resched() &&
319 lockdep_softirq_end();
320 account_irq_exit_time(current);
321 __local_bh_enable(SOFTIRQ_OFFSET);
322 WARN_ON_ONCE(in_interrupt());
323 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
326 asmlinkage void do_softirq(void)
334 local_irq_save(flags);
336 pending = local_softirq_pending();
339 do_softirq_own_stack();
341 local_irq_restore(flags);
345 * Enter an interrupt context.
349 int cpu = smp_processor_id();
352 if (is_idle_task(current) && !in_interrupt()) {
354 * Prevent raise_softirq from needlessly waking up ksoftirqd
355 * here, as softirq will be serviced on return from interrupt.
358 tick_check_idle(cpu);
365 static inline void invoke_softirq(void)
367 if (!force_irqthreads) {
368 lockdep_softirq_from_hardirq();
369 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
371 * We can safely execute softirq on the current stack if
372 * it is the irq stack, because it should be near empty
378 * Otherwise, irq_exit() is called on the task stack that can
379 * be potentially deep already. So call softirq in its own stack
380 * to prevent from any overrun.
382 do_softirq_own_stack();
389 static inline void tick_irq_exit(void)
391 #ifdef CONFIG_NO_HZ_COMMON
392 int cpu = smp_processor_id();
394 /* Make sure that timer wheel updates are propagated */
395 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
397 tick_nohz_irq_exit();
403 * Exit an interrupt context. Process softirqs if needed and possible:
407 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
410 WARN_ON_ONCE(!irqs_disabled());
413 account_irq_exit_time(current);
414 preempt_count_sub(HARDIRQ_OFFSET);
415 if (!in_interrupt() && local_softirq_pending())
420 trace_hardirq_exit(); /* must be last! */
424 * This function must run with irqs disabled!
426 inline void raise_softirq_irqoff(unsigned int nr)
428 __raise_softirq_irqoff(nr);
431 * If we're in an interrupt or softirq, we're done
432 * (this also catches softirq-disabled code). We will
433 * actually run the softirq once we return from
434 * the irq or softirq.
436 * Otherwise we wake up ksoftirqd to make sure we
437 * schedule the softirq soon.
443 void raise_softirq(unsigned int nr)
447 local_irq_save(flags);
448 raise_softirq_irqoff(nr);
449 local_irq_restore(flags);
452 void __raise_softirq_irqoff(unsigned int nr)
454 trace_softirq_raise(nr);
455 or_softirq_pending(1UL << nr);
458 void open_softirq(int nr, void (*action)(struct softirq_action *))
460 softirq_vec[nr].action = action;
468 struct tasklet_struct *head;
469 struct tasklet_struct **tail;
472 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
473 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
475 void __tasklet_schedule(struct tasklet_struct *t)
479 local_irq_save(flags);
481 *__this_cpu_read(tasklet_vec.tail) = t;
482 __this_cpu_write(tasklet_vec.tail, &(t->next));
483 raise_softirq_irqoff(TASKLET_SOFTIRQ);
484 local_irq_restore(flags);
487 EXPORT_SYMBOL(__tasklet_schedule);
489 void __tasklet_hi_schedule(struct tasklet_struct *t)
493 local_irq_save(flags);
495 *__this_cpu_read(tasklet_hi_vec.tail) = t;
496 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
497 raise_softirq_irqoff(HI_SOFTIRQ);
498 local_irq_restore(flags);
501 EXPORT_SYMBOL(__tasklet_hi_schedule);
503 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
505 BUG_ON(!irqs_disabled());
507 t->next = __this_cpu_read(tasklet_hi_vec.head);
508 __this_cpu_write(tasklet_hi_vec.head, t);
509 __raise_softirq_irqoff(HI_SOFTIRQ);
512 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
514 static void tasklet_action(struct softirq_action *a)
516 struct tasklet_struct *list;
519 list = __this_cpu_read(tasklet_vec.head);
520 __this_cpu_write(tasklet_vec.head, NULL);
521 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
525 struct tasklet_struct *t = list;
529 if (tasklet_trylock(t)) {
530 if (!atomic_read(&t->count)) {
531 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
542 *__this_cpu_read(tasklet_vec.tail) = t;
543 __this_cpu_write(tasklet_vec.tail, &(t->next));
544 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
549 static void tasklet_hi_action(struct softirq_action *a)
551 struct tasklet_struct *list;
554 list = __this_cpu_read(tasklet_hi_vec.head);
555 __this_cpu_write(tasklet_hi_vec.head, NULL);
556 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
560 struct tasklet_struct *t = list;
564 if (tasklet_trylock(t)) {
565 if (!atomic_read(&t->count)) {
566 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
577 *__this_cpu_read(tasklet_hi_vec.tail) = t;
578 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
579 __raise_softirq_irqoff(HI_SOFTIRQ);
585 void tasklet_init(struct tasklet_struct *t,
586 void (*func)(unsigned long), unsigned long data)
590 atomic_set(&t->count, 0);
595 EXPORT_SYMBOL(tasklet_init);
597 void tasklet_kill(struct tasklet_struct *t)
600 printk("Attempt to kill tasklet from interrupt\n");
602 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
605 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
607 tasklet_unlock_wait(t);
608 clear_bit(TASKLET_STATE_SCHED, &t->state);
611 EXPORT_SYMBOL(tasklet_kill);
618 * The trampoline is called when the hrtimer expires. It schedules a tasklet
619 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
620 * hrtimer callback, but from softirq context.
622 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
624 struct tasklet_hrtimer *ttimer =
625 container_of(timer, struct tasklet_hrtimer, timer);
627 tasklet_hi_schedule(&ttimer->tasklet);
628 return HRTIMER_NORESTART;
632 * Helper function which calls the hrtimer callback from
633 * tasklet/softirq context
635 static void __tasklet_hrtimer_trampoline(unsigned long data)
637 struct tasklet_hrtimer *ttimer = (void *)data;
638 enum hrtimer_restart restart;
640 restart = ttimer->function(&ttimer->timer);
641 if (restart != HRTIMER_NORESTART)
642 hrtimer_restart(&ttimer->timer);
646 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
647 * @ttimer: tasklet_hrtimer which is initialized
648 * @function: hrtimer callback function which gets called from softirq context
649 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
650 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
652 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
653 enum hrtimer_restart (*function)(struct hrtimer *),
654 clockid_t which_clock, enum hrtimer_mode mode)
656 hrtimer_init(&ttimer->timer, which_clock, mode);
657 ttimer->timer.function = __hrtimer_tasklet_trampoline;
658 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
659 (unsigned long)ttimer);
660 ttimer->function = function;
662 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
664 void __init softirq_init(void)
668 for_each_possible_cpu(cpu) {
669 per_cpu(tasklet_vec, cpu).tail =
670 &per_cpu(tasklet_vec, cpu).head;
671 per_cpu(tasklet_hi_vec, cpu).tail =
672 &per_cpu(tasklet_hi_vec, cpu).head;
675 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
676 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
679 static int ksoftirqd_should_run(unsigned int cpu)
681 return local_softirq_pending();
684 static void run_ksoftirqd(unsigned int cpu)
687 if (local_softirq_pending()) {
689 * We can safely run softirq on inline stack, as we are not deep
690 * in the task stack here.
693 rcu_note_context_switch(cpu);
701 #ifdef CONFIG_HOTPLUG_CPU
703 * tasklet_kill_immediate is called to remove a tasklet which can already be
704 * scheduled for execution on @cpu.
706 * Unlike tasklet_kill, this function removes the tasklet
707 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
709 * When this function is called, @cpu must be in the CPU_DEAD state.
711 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
713 struct tasklet_struct **i;
715 BUG_ON(cpu_online(cpu));
716 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
718 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
721 /* CPU is dead, so no lock needed. */
722 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
725 /* If this was the tail element, move the tail ptr */
727 per_cpu(tasklet_vec, cpu).tail = i;
734 static void takeover_tasklets(unsigned int cpu)
736 /* CPU is dead, so no lock needed. */
739 /* Find end, append list for that CPU. */
740 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
741 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
742 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
743 per_cpu(tasklet_vec, cpu).head = NULL;
744 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
746 raise_softirq_irqoff(TASKLET_SOFTIRQ);
748 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
749 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
750 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
751 per_cpu(tasklet_hi_vec, cpu).head = NULL;
752 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
754 raise_softirq_irqoff(HI_SOFTIRQ);
758 #endif /* CONFIG_HOTPLUG_CPU */
760 static int cpu_callback(struct notifier_block *nfb,
761 unsigned long action,
765 #ifdef CONFIG_HOTPLUG_CPU
767 case CPU_DEAD_FROZEN:
768 takeover_tasklets((unsigned long)hcpu);
770 #endif /* CONFIG_HOTPLUG_CPU */
775 static struct notifier_block cpu_nfb = {
776 .notifier_call = cpu_callback
779 static struct smp_hotplug_thread softirq_threads = {
781 .thread_should_run = ksoftirqd_should_run,
782 .thread_fn = run_ksoftirqd,
783 .thread_comm = "ksoftirqd/%u",
786 static __init int spawn_ksoftirqd(void)
788 register_cpu_notifier(&cpu_nfb);
790 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
794 early_initcall(spawn_ksoftirqd);
797 * [ These __weak aliases are kept in a separate compilation unit, so that
798 * GCC does not inline them incorrectly. ]
801 int __init __weak early_irq_init(void)
806 int __init __weak arch_probe_nr_irqs(void)
808 return NR_IRQS_LEGACY;
811 int __init __weak arch_early_irq_init(void)