Merge branch 'clockevents/for-Simon-3.13-rc2' into clockevents/3.14
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / softirq.c
1 /*
2  *      linux/kernel/softirq.c
3  *
4  *      Copyright (C) 1992 Linus Torvalds
5  *
6  *      Distribute under GPLv2.
7  *
8  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  */
10
11 #include <linux/export.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ftrace.h>
23 #include <linux/smp.h>
24 #include <linux/smpboot.h>
25 #include <linux/tick.h>
26
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/irq.h>
29
30 /*
31    - No shared variables, all the data are CPU local.
32    - If a softirq needs serialization, let it serialize itself
33      by its own spinlocks.
34    - Even if softirq is serialized, only local cpu is marked for
35      execution. Hence, we get something sort of weak cpu binding.
36      Though it is still not clear, will it result in better locality
37      or will not.
38
39    Examples:
40    - NET RX softirq. It is multithreaded and does not require
41      any global serialization.
42    - NET TX softirq. It kicks software netdevice queues, hence
43      it is logically serialized per device, but this serialization
44      is invisible to common code.
45    - Tasklets: serialized wrt itself.
46  */
47
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
51 #endif
52
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
54
55 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
56
57 char *softirq_to_name[NR_SOFTIRQS] = {
58         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59         "TASKLET", "SCHED", "HRTIMER", "RCU"
60 };
61
62 /*
63  * we cannot loop indefinitely here to avoid userspace starvation,
64  * but we also don't want to introduce a worst case 1/HZ latency
65  * to the pending events, so lets the scheduler to balance
66  * the softirq load for us.
67  */
68 static void wakeup_softirqd(void)
69 {
70         /* Interrupts are disabled: no need to stop preemption */
71         struct task_struct *tsk = __this_cpu_read(ksoftirqd);
72
73         if (tsk && tsk->state != TASK_RUNNING)
74                 wake_up_process(tsk);
75 }
76
77 /*
78  * preempt_count and SOFTIRQ_OFFSET usage:
79  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
80  *   softirq processing.
81  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
82  *   on local_bh_disable or local_bh_enable.
83  * This lets us distinguish between whether we are currently processing
84  * softirq and whether we just have bh disabled.
85  */
86
87 /*
88  * This one is for softirq.c-internal use,
89  * where hardirqs are disabled legitimately:
90  */
91 #ifdef CONFIG_TRACE_IRQFLAGS
92 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
93 {
94         unsigned long flags;
95
96         WARN_ON_ONCE(in_irq());
97
98         raw_local_irq_save(flags);
99         /*
100          * The preempt tracer hooks into preempt_count_add and will break
101          * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
102          * is set and before current->softirq_enabled is cleared.
103          * We must manually increment preempt_count here and manually
104          * call the trace_preempt_off later.
105          */
106         __preempt_count_add(cnt);
107         /*
108          * Were softirqs turned off above:
109          */
110         if (softirq_count() == cnt)
111                 trace_softirqs_off(ip);
112         raw_local_irq_restore(flags);
113
114         if (preempt_count() == cnt)
115                 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
116 }
117 #else /* !CONFIG_TRACE_IRQFLAGS */
118 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
119 {
120         preempt_count_add(cnt);
121         barrier();
122 }
123 #endif /* CONFIG_TRACE_IRQFLAGS */
124
125 void local_bh_disable(void)
126 {
127         __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
128 }
129
130 EXPORT_SYMBOL(local_bh_disable);
131
132 static void __local_bh_enable(unsigned int cnt)
133 {
134         WARN_ON_ONCE(!irqs_disabled());
135
136         if (softirq_count() == cnt)
137                 trace_softirqs_on(_RET_IP_);
138         preempt_count_sub(cnt);
139 }
140
141 /*
142  * Special-case - softirqs can safely be enabled in
143  * cond_resched_softirq(), or by __do_softirq(),
144  * without processing still-pending softirqs:
145  */
146 void _local_bh_enable(void)
147 {
148         WARN_ON_ONCE(in_irq());
149         __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
150 }
151
152 EXPORT_SYMBOL(_local_bh_enable);
153
154 static inline void _local_bh_enable_ip(unsigned long ip)
155 {
156         WARN_ON_ONCE(in_irq() || irqs_disabled());
157 #ifdef CONFIG_TRACE_IRQFLAGS
158         local_irq_disable();
159 #endif
160         /*
161          * Are softirqs going to be turned on now:
162          */
163         if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
164                 trace_softirqs_on(ip);
165         /*
166          * Keep preemption disabled until we are done with
167          * softirq processing:
168          */
169         preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
170
171         if (unlikely(!in_interrupt() && local_softirq_pending())) {
172                 /*
173                  * Run softirq if any pending. And do it in its own stack
174                  * as we may be calling this deep in a task call stack already.
175                  */
176                 do_softirq();
177         }
178
179         preempt_count_dec();
180 #ifdef CONFIG_TRACE_IRQFLAGS
181         local_irq_enable();
182 #endif
183         preempt_check_resched();
184 }
185
186 void local_bh_enable(void)
187 {
188         _local_bh_enable_ip(_RET_IP_);
189 }
190 EXPORT_SYMBOL(local_bh_enable);
191
192 void local_bh_enable_ip(unsigned long ip)
193 {
194         _local_bh_enable_ip(ip);
195 }
196 EXPORT_SYMBOL(local_bh_enable_ip);
197
198 /*
199  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
200  * but break the loop if need_resched() is set or after 2 ms.
201  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
202  * certain cases, such as stop_machine(), jiffies may cease to
203  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
204  * well to make sure we eventually return from this method.
205  *
206  * These limits have been established via experimentation.
207  * The two things to balance is latency against fairness -
208  * we want to handle softirqs as soon as possible, but they
209  * should not be able to lock up the box.
210  */
211 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
212 #define MAX_SOFTIRQ_RESTART 10
213
214 asmlinkage void __do_softirq(void)
215 {
216         struct softirq_action *h;
217         __u32 pending;
218         unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
219         int cpu;
220         unsigned long old_flags = current->flags;
221         int max_restart = MAX_SOFTIRQ_RESTART;
222
223         /*
224          * Mask out PF_MEMALLOC s current task context is borrowed for the
225          * softirq. A softirq handled such as network RX might set PF_MEMALLOC
226          * again if the socket is related to swap
227          */
228         current->flags &= ~PF_MEMALLOC;
229
230         pending = local_softirq_pending();
231         account_irq_enter_time(current);
232
233         __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
234         lockdep_softirq_enter();
235
236         cpu = smp_processor_id();
237 restart:
238         /* Reset the pending bitmask before enabling irqs */
239         set_softirq_pending(0);
240
241         local_irq_enable();
242
243         h = softirq_vec;
244
245         do {
246                 if (pending & 1) {
247                         unsigned int vec_nr = h - softirq_vec;
248                         int prev_count = preempt_count();
249
250                         kstat_incr_softirqs_this_cpu(vec_nr);
251
252                         trace_softirq_entry(vec_nr);
253                         h->action(h);
254                         trace_softirq_exit(vec_nr);
255                         if (unlikely(prev_count != preempt_count())) {
256                                 printk(KERN_ERR "huh, entered softirq %u %s %p"
257                                        "with preempt_count %08x,"
258                                        " exited with %08x?\n", vec_nr,
259                                        softirq_to_name[vec_nr], h->action,
260                                        prev_count, preempt_count());
261                                 preempt_count_set(prev_count);
262                         }
263
264                         rcu_bh_qs(cpu);
265                 }
266                 h++;
267                 pending >>= 1;
268         } while (pending);
269
270         local_irq_disable();
271
272         pending = local_softirq_pending();
273         if (pending) {
274                 if (time_before(jiffies, end) && !need_resched() &&
275                     --max_restart)
276                         goto restart;
277
278                 wakeup_softirqd();
279         }
280
281         lockdep_softirq_exit();
282
283         account_irq_exit_time(current);
284         __local_bh_enable(SOFTIRQ_OFFSET);
285         WARN_ON_ONCE(in_interrupt());
286         tsk_restore_flags(current, old_flags, PF_MEMALLOC);
287 }
288
289
290
291 asmlinkage void do_softirq(void)
292 {
293         __u32 pending;
294         unsigned long flags;
295
296         if (in_interrupt())
297                 return;
298
299         local_irq_save(flags);
300
301         pending = local_softirq_pending();
302
303         if (pending)
304                 do_softirq_own_stack();
305
306         local_irq_restore(flags);
307 }
308
309 /*
310  * Enter an interrupt context.
311  */
312 void irq_enter(void)
313 {
314         rcu_irq_enter();
315         if (is_idle_task(current) && !in_interrupt()) {
316                 /*
317                  * Prevent raise_softirq from needlessly waking up ksoftirqd
318                  * here, as softirq will be serviced on return from interrupt.
319                  */
320                 local_bh_disable();
321                 tick_check_idle();
322                 _local_bh_enable();
323         }
324
325         __irq_enter();
326 }
327
328 static inline void invoke_softirq(void)
329 {
330         if (!force_irqthreads) {
331 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
332                 /*
333                  * We can safely execute softirq on the current stack if
334                  * it is the irq stack, because it should be near empty
335                  * at this stage.
336                  */
337                 __do_softirq();
338 #else
339                 /*
340                  * Otherwise, irq_exit() is called on the task stack that can
341                  * be potentially deep already. So call softirq in its own stack
342                  * to prevent from any overrun.
343                  */
344                 do_softirq_own_stack();
345 #endif
346         } else {
347                 wakeup_softirqd();
348         }
349 }
350
351 static inline void tick_irq_exit(void)
352 {
353 #ifdef CONFIG_NO_HZ_COMMON
354         int cpu = smp_processor_id();
355
356         /* Make sure that timer wheel updates are propagated */
357         if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
358                 if (!in_interrupt())
359                         tick_nohz_irq_exit();
360         }
361 #endif
362 }
363
364 /*
365  * Exit an interrupt context. Process softirqs if needed and possible:
366  */
367 void irq_exit(void)
368 {
369 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
370         local_irq_disable();
371 #else
372         WARN_ON_ONCE(!irqs_disabled());
373 #endif
374
375         account_irq_exit_time(current);
376         trace_hardirq_exit();
377         preempt_count_sub(HARDIRQ_OFFSET);
378         if (!in_interrupt() && local_softirq_pending())
379                 invoke_softirq();
380
381         tick_irq_exit();
382         rcu_irq_exit();
383 }
384
385 /*
386  * This function must run with irqs disabled!
387  */
388 inline void raise_softirq_irqoff(unsigned int nr)
389 {
390         __raise_softirq_irqoff(nr);
391
392         /*
393          * If we're in an interrupt or softirq, we're done
394          * (this also catches softirq-disabled code). We will
395          * actually run the softirq once we return from
396          * the irq or softirq.
397          *
398          * Otherwise we wake up ksoftirqd to make sure we
399          * schedule the softirq soon.
400          */
401         if (!in_interrupt())
402                 wakeup_softirqd();
403 }
404
405 void raise_softirq(unsigned int nr)
406 {
407         unsigned long flags;
408
409         local_irq_save(flags);
410         raise_softirq_irqoff(nr);
411         local_irq_restore(flags);
412 }
413
414 void __raise_softirq_irqoff(unsigned int nr)
415 {
416         trace_softirq_raise(nr);
417         or_softirq_pending(1UL << nr);
418 }
419
420 void open_softirq(int nr, void (*action)(struct softirq_action *))
421 {
422         softirq_vec[nr].action = action;
423 }
424
425 /*
426  * Tasklets
427  */
428 struct tasklet_head
429 {
430         struct tasklet_struct *head;
431         struct tasklet_struct **tail;
432 };
433
434 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
435 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
436
437 void __tasklet_schedule(struct tasklet_struct *t)
438 {
439         unsigned long flags;
440
441         local_irq_save(flags);
442         t->next = NULL;
443         *__this_cpu_read(tasklet_vec.tail) = t;
444         __this_cpu_write(tasklet_vec.tail, &(t->next));
445         raise_softirq_irqoff(TASKLET_SOFTIRQ);
446         local_irq_restore(flags);
447 }
448
449 EXPORT_SYMBOL(__tasklet_schedule);
450
451 void __tasklet_hi_schedule(struct tasklet_struct *t)
452 {
453         unsigned long flags;
454
455         local_irq_save(flags);
456         t->next = NULL;
457         *__this_cpu_read(tasklet_hi_vec.tail) = t;
458         __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
459         raise_softirq_irqoff(HI_SOFTIRQ);
460         local_irq_restore(flags);
461 }
462
463 EXPORT_SYMBOL(__tasklet_hi_schedule);
464
465 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
466 {
467         BUG_ON(!irqs_disabled());
468
469         t->next = __this_cpu_read(tasklet_hi_vec.head);
470         __this_cpu_write(tasklet_hi_vec.head, t);
471         __raise_softirq_irqoff(HI_SOFTIRQ);
472 }
473
474 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
475
476 static void tasklet_action(struct softirq_action *a)
477 {
478         struct tasklet_struct *list;
479
480         local_irq_disable();
481         list = __this_cpu_read(tasklet_vec.head);
482         __this_cpu_write(tasklet_vec.head, NULL);
483         __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
484         local_irq_enable();
485
486         while (list) {
487                 struct tasklet_struct *t = list;
488
489                 list = list->next;
490
491                 if (tasklet_trylock(t)) {
492                         if (!atomic_read(&t->count)) {
493                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
494                                         BUG();
495                                 t->func(t->data);
496                                 tasklet_unlock(t);
497                                 continue;
498                         }
499                         tasklet_unlock(t);
500                 }
501
502                 local_irq_disable();
503                 t->next = NULL;
504                 *__this_cpu_read(tasklet_vec.tail) = t;
505                 __this_cpu_write(tasklet_vec.tail, &(t->next));
506                 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
507                 local_irq_enable();
508         }
509 }
510
511 static void tasklet_hi_action(struct softirq_action *a)
512 {
513         struct tasklet_struct *list;
514
515         local_irq_disable();
516         list = __this_cpu_read(tasklet_hi_vec.head);
517         __this_cpu_write(tasklet_hi_vec.head, NULL);
518         __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
519         local_irq_enable();
520
521         while (list) {
522                 struct tasklet_struct *t = list;
523
524                 list = list->next;
525
526                 if (tasklet_trylock(t)) {
527                         if (!atomic_read(&t->count)) {
528                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
529                                         BUG();
530                                 t->func(t->data);
531                                 tasklet_unlock(t);
532                                 continue;
533                         }
534                         tasklet_unlock(t);
535                 }
536
537                 local_irq_disable();
538                 t->next = NULL;
539                 *__this_cpu_read(tasklet_hi_vec.tail) = t;
540                 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
541                 __raise_softirq_irqoff(HI_SOFTIRQ);
542                 local_irq_enable();
543         }
544 }
545
546
547 void tasklet_init(struct tasklet_struct *t,
548                   void (*func)(unsigned long), unsigned long data)
549 {
550         t->next = NULL;
551         t->state = 0;
552         atomic_set(&t->count, 0);
553         t->func = func;
554         t->data = data;
555 }
556
557 EXPORT_SYMBOL(tasklet_init);
558
559 void tasklet_kill(struct tasklet_struct *t)
560 {
561         if (in_interrupt())
562                 printk("Attempt to kill tasklet from interrupt\n");
563
564         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
565                 do {
566                         yield();
567                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
568         }
569         tasklet_unlock_wait(t);
570         clear_bit(TASKLET_STATE_SCHED, &t->state);
571 }
572
573 EXPORT_SYMBOL(tasklet_kill);
574
575 /*
576  * tasklet_hrtimer
577  */
578
579 /*
580  * The trampoline is called when the hrtimer expires. It schedules a tasklet
581  * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
582  * hrtimer callback, but from softirq context.
583  */
584 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
585 {
586         struct tasklet_hrtimer *ttimer =
587                 container_of(timer, struct tasklet_hrtimer, timer);
588
589         tasklet_hi_schedule(&ttimer->tasklet);
590         return HRTIMER_NORESTART;
591 }
592
593 /*
594  * Helper function which calls the hrtimer callback from
595  * tasklet/softirq context
596  */
597 static void __tasklet_hrtimer_trampoline(unsigned long data)
598 {
599         struct tasklet_hrtimer *ttimer = (void *)data;
600         enum hrtimer_restart restart;
601
602         restart = ttimer->function(&ttimer->timer);
603         if (restart != HRTIMER_NORESTART)
604                 hrtimer_restart(&ttimer->timer);
605 }
606
607 /**
608  * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
609  * @ttimer:      tasklet_hrtimer which is initialized
610  * @function:    hrtimer callback function which gets called from softirq context
611  * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
612  * @mode:        hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
613  */
614 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
615                           enum hrtimer_restart (*function)(struct hrtimer *),
616                           clockid_t which_clock, enum hrtimer_mode mode)
617 {
618         hrtimer_init(&ttimer->timer, which_clock, mode);
619         ttimer->timer.function = __hrtimer_tasklet_trampoline;
620         tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
621                      (unsigned long)ttimer);
622         ttimer->function = function;
623 }
624 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
625
626 void __init softirq_init(void)
627 {
628         int cpu;
629
630         for_each_possible_cpu(cpu) {
631                 per_cpu(tasklet_vec, cpu).tail =
632                         &per_cpu(tasklet_vec, cpu).head;
633                 per_cpu(tasklet_hi_vec, cpu).tail =
634                         &per_cpu(tasklet_hi_vec, cpu).head;
635         }
636
637         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
638         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
639 }
640
641 static int ksoftirqd_should_run(unsigned int cpu)
642 {
643         return local_softirq_pending();
644 }
645
646 static void run_ksoftirqd(unsigned int cpu)
647 {
648         local_irq_disable();
649         if (local_softirq_pending()) {
650                 /*
651                  * We can safely run softirq on inline stack, as we are not deep
652                  * in the task stack here.
653                  */
654                 __do_softirq();
655                 rcu_note_context_switch(cpu);
656                 local_irq_enable();
657                 cond_resched();
658                 return;
659         }
660         local_irq_enable();
661 }
662
663 #ifdef CONFIG_HOTPLUG_CPU
664 /*
665  * tasklet_kill_immediate is called to remove a tasklet which can already be
666  * scheduled for execution on @cpu.
667  *
668  * Unlike tasklet_kill, this function removes the tasklet
669  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
670  *
671  * When this function is called, @cpu must be in the CPU_DEAD state.
672  */
673 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
674 {
675         struct tasklet_struct **i;
676
677         BUG_ON(cpu_online(cpu));
678         BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
679
680         if (!test_bit(TASKLET_STATE_SCHED, &t->state))
681                 return;
682
683         /* CPU is dead, so no lock needed. */
684         for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
685                 if (*i == t) {
686                         *i = t->next;
687                         /* If this was the tail element, move the tail ptr */
688                         if (*i == NULL)
689                                 per_cpu(tasklet_vec, cpu).tail = i;
690                         return;
691                 }
692         }
693         BUG();
694 }
695
696 static void takeover_tasklets(unsigned int cpu)
697 {
698         /* CPU is dead, so no lock needed. */
699         local_irq_disable();
700
701         /* Find end, append list for that CPU. */
702         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
703                 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
704                 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
705                 per_cpu(tasklet_vec, cpu).head = NULL;
706                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
707         }
708         raise_softirq_irqoff(TASKLET_SOFTIRQ);
709
710         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
711                 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
712                 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
713                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
714                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
715         }
716         raise_softirq_irqoff(HI_SOFTIRQ);
717
718         local_irq_enable();
719 }
720 #endif /* CONFIG_HOTPLUG_CPU */
721
722 static int cpu_callback(struct notifier_block *nfb,
723                                   unsigned long action,
724                                   void *hcpu)
725 {
726         switch (action) {
727 #ifdef CONFIG_HOTPLUG_CPU
728         case CPU_DEAD:
729         case CPU_DEAD_FROZEN:
730                 takeover_tasklets((unsigned long)hcpu);
731                 break;
732 #endif /* CONFIG_HOTPLUG_CPU */
733         }
734         return NOTIFY_OK;
735 }
736
737 static struct notifier_block cpu_nfb = {
738         .notifier_call = cpu_callback
739 };
740
741 static struct smp_hotplug_thread softirq_threads = {
742         .store                  = &ksoftirqd,
743         .thread_should_run      = ksoftirqd_should_run,
744         .thread_fn              = run_ksoftirqd,
745         .thread_comm            = "ksoftirqd/%u",
746 };
747
748 static __init int spawn_ksoftirqd(void)
749 {
750         register_cpu_notifier(&cpu_nfb);
751
752         BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
753
754         return 0;
755 }
756 early_initcall(spawn_ksoftirqd);
757
758 /*
759  * [ These __weak aliases are kept in a separate compilation unit, so that
760  *   GCC does not inline them incorrectly. ]
761  */
762
763 int __init __weak early_irq_init(void)
764 {
765         return 0;
766 }
767
768 int __init __weak arch_probe_nr_irqs(void)
769 {
770         return NR_IRQS_LEGACY;
771 }
772
773 int __init __weak arch_early_irq_init(void)
774 {
775         return 0;
776 }