Merge branch 'core/urgent' into core/locking
[platform/kernel/linux-stable.git] / kernel / softirq.c
1 /*
2  *      linux/kernel/softirq.c
3  *
4  *      Copyright (C) 1992 Linus Torvalds
5  *
6  *      Distribute under GPLv2.
7  *
8  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  */
10
11 #include <linux/export.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ftrace.h>
23 #include <linux/smp.h>
24 #include <linux/smpboot.h>
25 #include <linux/tick.h>
26
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/irq.h>
29
30 /*
31    - No shared variables, all the data are CPU local.
32    - If a softirq needs serialization, let it serialize itself
33      by its own spinlocks.
34    - Even if softirq is serialized, only local cpu is marked for
35      execution. Hence, we get something sort of weak cpu binding.
36      Though it is still not clear, will it result in better locality
37      or will not.
38
39    Examples:
40    - NET RX softirq. It is multithreaded and does not require
41      any global serialization.
42    - NET TX softirq. It kicks software netdevice queues, hence
43      it is logically serialized per device, but this serialization
44      is invisible to common code.
45    - Tasklets: serialized wrt itself.
46  */
47
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
51 #endif
52
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
54
55 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
56
57 char *softirq_to_name[NR_SOFTIRQS] = {
58         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59         "TASKLET", "SCHED", "HRTIMER", "RCU"
60 };
61
62 /*
63  * we cannot loop indefinitely here to avoid userspace starvation,
64  * but we also don't want to introduce a worst case 1/HZ latency
65  * to the pending events, so lets the scheduler to balance
66  * the softirq load for us.
67  */
68 static void wakeup_softirqd(void)
69 {
70         /* Interrupts are disabled: no need to stop preemption */
71         struct task_struct *tsk = __this_cpu_read(ksoftirqd);
72
73         if (tsk && tsk->state != TASK_RUNNING)
74                 wake_up_process(tsk);
75 }
76
77 /*
78  * preempt_count and SOFTIRQ_OFFSET usage:
79  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
80  *   softirq processing.
81  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82  *   on local_bh_disable or local_bh_enable.
83  * This lets us distinguish between whether we are currently processing
84  * softirq and whether we just have bh disabled.
85  */
86
87 /*
88  * This one is for softirq.c-internal use,
89  * where hardirqs are disabled legitimately:
90  */
91 #ifdef CONFIG_TRACE_IRQFLAGS
92 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
93 {
94         unsigned long flags;
95
96         WARN_ON_ONCE(in_irq());
97
98         raw_local_irq_save(flags);
99         /*
100          * The preempt tracer hooks into preempt_count_add and will break
101          * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102          * is set and before current->softirq_enabled is cleared.
103          * We must manually increment preempt_count here and manually
104          * call the trace_preempt_off later.
105          */
106         __preempt_count_add(cnt);
107         /*
108          * Were softirqs turned off above:
109          */
110         if (softirq_count() == cnt)
111                 trace_softirqs_off(ip);
112         raw_local_irq_restore(flags);
113
114         if (preempt_count() == cnt)
115                 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
116 }
117 #else /* !CONFIG_TRACE_IRQFLAGS */
118 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
119 {
120         preempt_count_add(cnt);
121         barrier();
122 }
123 #endif /* CONFIG_TRACE_IRQFLAGS */
124
125 void local_bh_disable(void)
126 {
127         __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
128 }
129
130 EXPORT_SYMBOL(local_bh_disable);
131
132 static void __local_bh_enable(unsigned int cnt)
133 {
134         WARN_ON_ONCE(!irqs_disabled());
135
136         if (softirq_count() == cnt)
137                 trace_softirqs_on(_RET_IP_);
138         preempt_count_sub(cnt);
139 }
140
141 /*
142  * Special-case - softirqs can safely be enabled in
143  * cond_resched_softirq(), or by __do_softirq(),
144  * without processing still-pending softirqs:
145  */
146 void _local_bh_enable(void)
147 {
148         WARN_ON_ONCE(in_irq());
149         __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
150 }
151
152 EXPORT_SYMBOL(_local_bh_enable);
153
154 static inline void _local_bh_enable_ip(unsigned long ip)
155 {
156         WARN_ON_ONCE(in_irq() || irqs_disabled());
157 #ifdef CONFIG_TRACE_IRQFLAGS
158         local_irq_disable();
159 #endif
160         /*
161          * Are softirqs going to be turned on now:
162          */
163         if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
164                 trace_softirqs_on(ip);
165         /*
166          * Keep preemption disabled until we are done with
167          * softirq processing:
168          */
169         preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
170
171         if (unlikely(!in_interrupt() && local_softirq_pending())) {
172                 /*
173                  * Run softirq if any pending. And do it in its own stack
174                  * as we may be calling this deep in a task call stack already.
175                  */
176                 do_softirq();
177         }
178
179         preempt_count_dec();
180 #ifdef CONFIG_TRACE_IRQFLAGS
181         local_irq_enable();
182 #endif
183         preempt_check_resched();
184 }
185
186 void local_bh_enable(void)
187 {
188         _local_bh_enable_ip(_RET_IP_);
189 }
190 EXPORT_SYMBOL(local_bh_enable);
191
192 void local_bh_enable_ip(unsigned long ip)
193 {
194         _local_bh_enable_ip(ip);
195 }
196 EXPORT_SYMBOL(local_bh_enable_ip);
197
198 /*
199  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
200  * but break the loop if need_resched() is set or after 2 ms.
201  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
202  * certain cases, such as stop_machine(), jiffies may cease to
203  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
204  * well to make sure we eventually return from this method.
205  *
206  * These limits have been established via experimentation.
207  * The two things to balance is latency against fairness -
208  * we want to handle softirqs as soon as possible, but they
209  * should not be able to lock up the box.
210  */
211 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
212 #define MAX_SOFTIRQ_RESTART 10
213
214 #ifdef CONFIG_TRACE_IRQFLAGS
215 /*
216  * Convoluted means of passing __do_softirq() a message through the various
217  * architecture execute_on_stack() bits.
218  *
219  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
220  * to keep the lockdep irq context tracking as tight as possible in order to
221  * not miss-qualify lock contexts and miss possible deadlocks.
222  */
223 static DEFINE_PER_CPU(int, softirq_from_hardirq);
224
225 static inline void lockdep_softirq_from_hardirq(void)
226 {
227         this_cpu_write(softirq_from_hardirq, 1);
228 }
229
230 static inline void lockdep_softirq_start(void)
231 {
232         if (this_cpu_read(softirq_from_hardirq))
233                 trace_hardirq_exit();
234         lockdep_softirq_enter();
235 }
236
237 static inline void lockdep_softirq_end(void)
238 {
239         lockdep_softirq_exit();
240         if (this_cpu_read(softirq_from_hardirq)) {
241                 this_cpu_write(softirq_from_hardirq, 0);
242                 trace_hardirq_enter();
243         }
244 }
245
246 #else
247 static inline void lockdep_softirq_from_hardirq(void) { }
248 static inline void lockdep_softirq_start(void) { }
249 static inline void lockdep_softirq_end(void) { }
250 #endif
251
252 asmlinkage void __do_softirq(void)
253 {
254         unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
255         unsigned long old_flags = current->flags;
256         int max_restart = MAX_SOFTIRQ_RESTART;
257         struct softirq_action *h;
258         __u32 pending;
259         int cpu;
260
261         /*
262          * Mask out PF_MEMALLOC s current task context is borrowed for the
263          * softirq. A softirq handled such as network RX might set PF_MEMALLOC
264          * again if the socket is related to swap
265          */
266         current->flags &= ~PF_MEMALLOC;
267
268         pending = local_softirq_pending();
269         account_irq_enter_time(current);
270
271         __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
272         lockdep_softirq_start();
273
274         cpu = smp_processor_id();
275 restart:
276         /* Reset the pending bitmask before enabling irqs */
277         set_softirq_pending(0);
278
279         local_irq_enable();
280
281         h = softirq_vec;
282
283         do {
284                 if (pending & 1) {
285                         unsigned int vec_nr = h - softirq_vec;
286                         int prev_count = preempt_count();
287
288                         kstat_incr_softirqs_this_cpu(vec_nr);
289
290                         trace_softirq_entry(vec_nr);
291                         h->action(h);
292                         trace_softirq_exit(vec_nr);
293                         if (unlikely(prev_count != preempt_count())) {
294                                 printk(KERN_ERR "huh, entered softirq %u %s %p"
295                                        "with preempt_count %08x,"
296                                        " exited with %08x?\n", vec_nr,
297                                        softirq_to_name[vec_nr], h->action,
298                                        prev_count, preempt_count());
299                                 preempt_count_set(prev_count);
300                         }
301
302                         rcu_bh_qs(cpu);
303                 }
304                 h++;
305                 pending >>= 1;
306         } while (pending);
307
308         local_irq_disable();
309
310         pending = local_softirq_pending();
311         if (pending) {
312                 if (time_before(jiffies, end) && !need_resched() &&
313                     --max_restart)
314                         goto restart;
315
316                 wakeup_softirqd();
317         }
318
319         lockdep_softirq_end();
320         account_irq_exit_time(current);
321         __local_bh_enable(SOFTIRQ_OFFSET);
322         WARN_ON_ONCE(in_interrupt());
323         tsk_restore_flags(current, old_flags, PF_MEMALLOC);
324 }
325
326 asmlinkage void do_softirq(void)
327 {
328         __u32 pending;
329         unsigned long flags;
330
331         if (in_interrupt())
332                 return;
333
334         local_irq_save(flags);
335
336         pending = local_softirq_pending();
337
338         if (pending)
339                 do_softirq_own_stack();
340
341         local_irq_restore(flags);
342 }
343
344 /*
345  * Enter an interrupt context.
346  */
347 void irq_enter(void)
348 {
349         int cpu = smp_processor_id();
350
351         rcu_irq_enter();
352         if (is_idle_task(current) && !in_interrupt()) {
353                 /*
354                  * Prevent raise_softirq from needlessly waking up ksoftirqd
355                  * here, as softirq will be serviced on return from interrupt.
356                  */
357                 local_bh_disable();
358                 tick_check_idle(cpu);
359                 _local_bh_enable();
360         }
361
362         __irq_enter();
363 }
364
365 static inline void invoke_softirq(void)
366 {
367         if (!force_irqthreads) {
368                 lockdep_softirq_from_hardirq();
369 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
370                 /*
371                  * We can safely execute softirq on the current stack if
372                  * it is the irq stack, because it should be near empty
373                  * at this stage.
374                  */
375                 __do_softirq();
376 #else
377                 /*
378                  * Otherwise, irq_exit() is called on the task stack that can
379                  * be potentially deep already. So call softirq in its own stack
380                  * to prevent from any overrun.
381                  */
382                 do_softirq_own_stack();
383 #endif
384         } else {
385                 wakeup_softirqd();
386         }
387 }
388
389 static inline void tick_irq_exit(void)
390 {
391 #ifdef CONFIG_NO_HZ_COMMON
392         int cpu = smp_processor_id();
393
394         /* Make sure that timer wheel updates are propagated */
395         if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
396                 if (!in_interrupt())
397                         tick_nohz_irq_exit();
398         }
399 #endif
400 }
401
402 /*
403  * Exit an interrupt context. Process softirqs if needed and possible:
404  */
405 void irq_exit(void)
406 {
407 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
408         local_irq_disable();
409 #else
410         WARN_ON_ONCE(!irqs_disabled());
411 #endif
412
413         account_irq_exit_time(current);
414         preempt_count_sub(HARDIRQ_OFFSET);
415         if (!in_interrupt() && local_softirq_pending())
416                 invoke_softirq();
417
418         tick_irq_exit();
419         rcu_irq_exit();
420         trace_hardirq_exit(); /* must be last! */
421 }
422
423 /*
424  * This function must run with irqs disabled!
425  */
426 inline void raise_softirq_irqoff(unsigned int nr)
427 {
428         __raise_softirq_irqoff(nr);
429
430         /*
431          * If we're in an interrupt or softirq, we're done
432          * (this also catches softirq-disabled code). We will
433          * actually run the softirq once we return from
434          * the irq or softirq.
435          *
436          * Otherwise we wake up ksoftirqd to make sure we
437          * schedule the softirq soon.
438          */
439         if (!in_interrupt())
440                 wakeup_softirqd();
441 }
442
443 void raise_softirq(unsigned int nr)
444 {
445         unsigned long flags;
446
447         local_irq_save(flags);
448         raise_softirq_irqoff(nr);
449         local_irq_restore(flags);
450 }
451
452 void __raise_softirq_irqoff(unsigned int nr)
453 {
454         trace_softirq_raise(nr);
455         or_softirq_pending(1UL << nr);
456 }
457
458 void open_softirq(int nr, void (*action)(struct softirq_action *))
459 {
460         softirq_vec[nr].action = action;
461 }
462
463 /*
464  * Tasklets
465  */
466 struct tasklet_head
467 {
468         struct tasklet_struct *head;
469         struct tasklet_struct **tail;
470 };
471
472 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
473 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
474
475 void __tasklet_schedule(struct tasklet_struct *t)
476 {
477         unsigned long flags;
478
479         local_irq_save(flags);
480         t->next = NULL;
481         *__this_cpu_read(tasklet_vec.tail) = t;
482         __this_cpu_write(tasklet_vec.tail, &(t->next));
483         raise_softirq_irqoff(TASKLET_SOFTIRQ);
484         local_irq_restore(flags);
485 }
486
487 EXPORT_SYMBOL(__tasklet_schedule);
488
489 void __tasklet_hi_schedule(struct tasklet_struct *t)
490 {
491         unsigned long flags;
492
493         local_irq_save(flags);
494         t->next = NULL;
495         *__this_cpu_read(tasklet_hi_vec.tail) = t;
496         __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
497         raise_softirq_irqoff(HI_SOFTIRQ);
498         local_irq_restore(flags);
499 }
500
501 EXPORT_SYMBOL(__tasklet_hi_schedule);
502
503 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
504 {
505         BUG_ON(!irqs_disabled());
506
507         t->next = __this_cpu_read(tasklet_hi_vec.head);
508         __this_cpu_write(tasklet_hi_vec.head, t);
509         __raise_softirq_irqoff(HI_SOFTIRQ);
510 }
511
512 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
513
514 static void tasklet_action(struct softirq_action *a)
515 {
516         struct tasklet_struct *list;
517
518         local_irq_disable();
519         list = __this_cpu_read(tasklet_vec.head);
520         __this_cpu_write(tasklet_vec.head, NULL);
521         __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
522         local_irq_enable();
523
524         while (list) {
525                 struct tasklet_struct *t = list;
526
527                 list = list->next;
528
529                 if (tasklet_trylock(t)) {
530                         if (!atomic_read(&t->count)) {
531                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
532                                         BUG();
533                                 t->func(t->data);
534                                 tasklet_unlock(t);
535                                 continue;
536                         }
537                         tasklet_unlock(t);
538                 }
539
540                 local_irq_disable();
541                 t->next = NULL;
542                 *__this_cpu_read(tasklet_vec.tail) = t;
543                 __this_cpu_write(tasklet_vec.tail, &(t->next));
544                 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
545                 local_irq_enable();
546         }
547 }
548
549 static void tasklet_hi_action(struct softirq_action *a)
550 {
551         struct tasklet_struct *list;
552
553         local_irq_disable();
554         list = __this_cpu_read(tasklet_hi_vec.head);
555         __this_cpu_write(tasklet_hi_vec.head, NULL);
556         __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
557         local_irq_enable();
558
559         while (list) {
560                 struct tasklet_struct *t = list;
561
562                 list = list->next;
563
564                 if (tasklet_trylock(t)) {
565                         if (!atomic_read(&t->count)) {
566                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
567                                         BUG();
568                                 t->func(t->data);
569                                 tasklet_unlock(t);
570                                 continue;
571                         }
572                         tasklet_unlock(t);
573                 }
574
575                 local_irq_disable();
576                 t->next = NULL;
577                 *__this_cpu_read(tasklet_hi_vec.tail) = t;
578                 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
579                 __raise_softirq_irqoff(HI_SOFTIRQ);
580                 local_irq_enable();
581         }
582 }
583
584
585 void tasklet_init(struct tasklet_struct *t,
586                   void (*func)(unsigned long), unsigned long data)
587 {
588         t->next = NULL;
589         t->state = 0;
590         atomic_set(&t->count, 0);
591         t->func = func;
592         t->data = data;
593 }
594
595 EXPORT_SYMBOL(tasklet_init);
596
597 void tasklet_kill(struct tasklet_struct *t)
598 {
599         if (in_interrupt())
600                 printk("Attempt to kill tasklet from interrupt\n");
601
602         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
603                 do {
604                         yield();
605                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
606         }
607         tasklet_unlock_wait(t);
608         clear_bit(TASKLET_STATE_SCHED, &t->state);
609 }
610
611 EXPORT_SYMBOL(tasklet_kill);
612
613 /*
614  * tasklet_hrtimer
615  */
616
617 /*
618  * The trampoline is called when the hrtimer expires. It schedules a tasklet
619  * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
620  * hrtimer callback, but from softirq context.
621  */
622 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
623 {
624         struct tasklet_hrtimer *ttimer =
625                 container_of(timer, struct tasklet_hrtimer, timer);
626
627         tasklet_hi_schedule(&ttimer->tasklet);
628         return HRTIMER_NORESTART;
629 }
630
631 /*
632  * Helper function which calls the hrtimer callback from
633  * tasklet/softirq context
634  */
635 static void __tasklet_hrtimer_trampoline(unsigned long data)
636 {
637         struct tasklet_hrtimer *ttimer = (void *)data;
638         enum hrtimer_restart restart;
639
640         restart = ttimer->function(&ttimer->timer);
641         if (restart != HRTIMER_NORESTART)
642                 hrtimer_restart(&ttimer->timer);
643 }
644
645 /**
646  * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
647  * @ttimer:      tasklet_hrtimer which is initialized
648  * @function:    hrtimer callback function which gets called from softirq context
649  * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
650  * @mode:        hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
651  */
652 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
653                           enum hrtimer_restart (*function)(struct hrtimer *),
654                           clockid_t which_clock, enum hrtimer_mode mode)
655 {
656         hrtimer_init(&ttimer->timer, which_clock, mode);
657         ttimer->timer.function = __hrtimer_tasklet_trampoline;
658         tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
659                      (unsigned long)ttimer);
660         ttimer->function = function;
661 }
662 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
663
664 void __init softirq_init(void)
665 {
666         int cpu;
667
668         for_each_possible_cpu(cpu) {
669                 per_cpu(tasklet_vec, cpu).tail =
670                         &per_cpu(tasklet_vec, cpu).head;
671                 per_cpu(tasklet_hi_vec, cpu).tail =
672                         &per_cpu(tasklet_hi_vec, cpu).head;
673         }
674
675         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
676         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
677 }
678
679 static int ksoftirqd_should_run(unsigned int cpu)
680 {
681         return local_softirq_pending();
682 }
683
684 static void run_ksoftirqd(unsigned int cpu)
685 {
686         local_irq_disable();
687         if (local_softirq_pending()) {
688                 /*
689                  * We can safely run softirq on inline stack, as we are not deep
690                  * in the task stack here.
691                  */
692                 __do_softirq();
693                 rcu_note_context_switch(cpu);
694                 local_irq_enable();
695                 cond_resched();
696                 return;
697         }
698         local_irq_enable();
699 }
700
701 #ifdef CONFIG_HOTPLUG_CPU
702 /*
703  * tasklet_kill_immediate is called to remove a tasklet which can already be
704  * scheduled for execution on @cpu.
705  *
706  * Unlike tasklet_kill, this function removes the tasklet
707  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
708  *
709  * When this function is called, @cpu must be in the CPU_DEAD state.
710  */
711 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
712 {
713         struct tasklet_struct **i;
714
715         BUG_ON(cpu_online(cpu));
716         BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
717
718         if (!test_bit(TASKLET_STATE_SCHED, &t->state))
719                 return;
720
721         /* CPU is dead, so no lock needed. */
722         for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
723                 if (*i == t) {
724                         *i = t->next;
725                         /* If this was the tail element, move the tail ptr */
726                         if (*i == NULL)
727                                 per_cpu(tasklet_vec, cpu).tail = i;
728                         return;
729                 }
730         }
731         BUG();
732 }
733
734 static void takeover_tasklets(unsigned int cpu)
735 {
736         /* CPU is dead, so no lock needed. */
737         local_irq_disable();
738
739         /* Find end, append list for that CPU. */
740         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
741                 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
742                 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
743                 per_cpu(tasklet_vec, cpu).head = NULL;
744                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
745         }
746         raise_softirq_irqoff(TASKLET_SOFTIRQ);
747
748         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
749                 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
750                 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
751                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
752                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
753         }
754         raise_softirq_irqoff(HI_SOFTIRQ);
755
756         local_irq_enable();
757 }
758 #endif /* CONFIG_HOTPLUG_CPU */
759
760 static int cpu_callback(struct notifier_block *nfb,
761                                   unsigned long action,
762                                   void *hcpu)
763 {
764         switch (action) {
765 #ifdef CONFIG_HOTPLUG_CPU
766         case CPU_DEAD:
767         case CPU_DEAD_FROZEN:
768                 takeover_tasklets((unsigned long)hcpu);
769                 break;
770 #endif /* CONFIG_HOTPLUG_CPU */
771         }
772         return NOTIFY_OK;
773 }
774
775 static struct notifier_block cpu_nfb = {
776         .notifier_call = cpu_callback
777 };
778
779 static struct smp_hotplug_thread softirq_threads = {
780         .store                  = &ksoftirqd,
781         .thread_should_run      = ksoftirqd_should_run,
782         .thread_fn              = run_ksoftirqd,
783         .thread_comm            = "ksoftirqd/%u",
784 };
785
786 static __init int spawn_ksoftirqd(void)
787 {
788         register_cpu_notifier(&cpu_nfb);
789
790         BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
791
792         return 0;
793 }
794 early_initcall(spawn_ksoftirqd);
795
796 /*
797  * [ These __weak aliases are kept in a separate compilation unit, so that
798  *   GCC does not inline them incorrectly. ]
799  */
800
801 int __init __weak early_irq_init(void)
802 {
803         return 0;
804 }
805
806 int __init __weak arch_probe_nr_irqs(void)
807 {
808         return NR_IRQS_LEGACY;
809 }
810
811 int __init __weak arch_early_irq_init(void)
812 {
813         return 0;
814 }