Revert "Bluetooth: Store advertising handle so it can be re-enabled"
[platform/kernel/linux-rpi.git] / kernel / softirq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *      linux/kernel/softirq.c
4  *
5  *      Copyright (C) 1992 Linus Torvalds
6  *
7  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30
31 #include <asm/softirq_stack.h>
32
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/irq.h>
35
36 /*
37    - No shared variables, all the data are CPU local.
38    - If a softirq needs serialization, let it serialize itself
39      by its own spinlocks.
40    - Even if softirq is serialized, only local cpu is marked for
41      execution. Hence, we get something sort of weak cpu binding.
42      Though it is still not clear, will it result in better locality
43      or will not.
44
45    Examples:
46    - NET RX softirq. It is multithreaded and does not require
47      any global serialization.
48    - NET TX softirq. It kicks software netdevice queues, hence
49      it is logically serialized per device, but this serialization
50      is invisible to common code.
51    - Tasklets: serialized wrt itself.
52  */
53
54 #ifndef __ARCH_IRQ_STAT
55 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
56 EXPORT_PER_CPU_SYMBOL(irq_stat);
57 #endif
58
59 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
60
61 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62
63 const char * const softirq_to_name[NR_SOFTIRQS] = {
64         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
65         "TASKLET", "SCHED", "HRTIMER", "RCU"
66 };
67
68 /*
69  * we cannot loop indefinitely here to avoid userspace starvation,
70  * but we also don't want to introduce a worst case 1/HZ latency
71  * to the pending events, so lets the scheduler to balance
72  * the softirq load for us.
73  */
74 static void wakeup_softirqd(void)
75 {
76         /* Interrupts are disabled: no need to stop preemption */
77         struct task_struct *tsk = __this_cpu_read(ksoftirqd);
78
79         if (tsk)
80                 wake_up_process(tsk);
81 }
82
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 DEFINE_PER_CPU(int, hardirqs_enabled);
85 DEFINE_PER_CPU(int, hardirq_context);
86 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
88 #endif
89
90 /*
91  * SOFTIRQ_OFFSET usage:
92  *
93  * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
94  * to a per CPU counter and to task::softirqs_disabled_cnt.
95  *
96  * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
97  *   processing.
98  *
99  * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100  *   on local_bh_disable or local_bh_enable.
101  *
102  * This lets us distinguish between whether we are currently processing
103  * softirq and whether we just have bh disabled.
104  */
105 #ifdef CONFIG_PREEMPT_RT
106
107 /*
108  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
109  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
110  * softirq disabled section to be preempted.
111  *
112  * The per task counter is used for softirq_count(), in_softirq() and
113  * in_serving_softirqs() because these counts are only valid when the task
114  * holding softirq_ctrl::lock is running.
115  *
116  * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
117  * the task which is in a softirq disabled section is preempted or blocks.
118  */
119 struct softirq_ctrl {
120         local_lock_t    lock;
121         int             cnt;
122 };
123
124 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
125         .lock   = INIT_LOCAL_LOCK(softirq_ctrl.lock),
126 };
127
128 /**
129  * local_bh_blocked() - Check for idle whether BH processing is blocked
130  *
131  * Returns false if the per CPU softirq::cnt is 0 otherwise true.
132  *
133  * This is invoked from the idle task to guard against false positive
134  * softirq pending warnings, which would happen when the task which holds
135  * softirq_ctrl::lock was the only running task on the CPU and blocks on
136  * some other lock.
137  */
138 bool local_bh_blocked(void)
139 {
140         return __this_cpu_read(softirq_ctrl.cnt) != 0;
141 }
142
143 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
144 {
145         unsigned long flags;
146         int newcnt;
147
148         WARN_ON_ONCE(in_hardirq());
149
150         /* First entry of a task into a BH disabled section? */
151         if (!current->softirq_disable_cnt) {
152                 if (preemptible()) {
153                         local_lock(&softirq_ctrl.lock);
154                         /* Required to meet the RCU bottomhalf requirements. */
155                         rcu_read_lock();
156                 } else {
157                         DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
158                 }
159         }
160
161         /*
162          * Track the per CPU softirq disabled state. On RT this is per CPU
163          * state to allow preemption of bottom half disabled sections.
164          */
165         newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
166         /*
167          * Reflect the result in the task state to prevent recursion on the
168          * local lock and to make softirq_count() & al work.
169          */
170         current->softirq_disable_cnt = newcnt;
171
172         if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
173                 raw_local_irq_save(flags);
174                 lockdep_softirqs_off(ip);
175                 raw_local_irq_restore(flags);
176         }
177 }
178 EXPORT_SYMBOL(__local_bh_disable_ip);
179
180 static void __local_bh_enable(unsigned int cnt, bool unlock)
181 {
182         unsigned long flags;
183         int newcnt;
184
185         DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
186                             this_cpu_read(softirq_ctrl.cnt));
187
188         if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
189                 raw_local_irq_save(flags);
190                 lockdep_softirqs_on(_RET_IP_);
191                 raw_local_irq_restore(flags);
192         }
193
194         newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
195         current->softirq_disable_cnt = newcnt;
196
197         if (!newcnt && unlock) {
198                 rcu_read_unlock();
199                 local_unlock(&softirq_ctrl.lock);
200         }
201 }
202
203 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
204 {
205         bool preempt_on = preemptible();
206         unsigned long flags;
207         u32 pending;
208         int curcnt;
209
210         WARN_ON_ONCE(in_hardirq());
211         lockdep_assert_irqs_enabled();
212
213         local_irq_save(flags);
214         curcnt = __this_cpu_read(softirq_ctrl.cnt);
215
216         /*
217          * If this is not reenabling soft interrupts, no point in trying to
218          * run pending ones.
219          */
220         if (curcnt != cnt)
221                 goto out;
222
223         pending = local_softirq_pending();
224         if (!pending)
225                 goto out;
226
227         /*
228          * If this was called from non preemptible context, wake up the
229          * softirq daemon.
230          */
231         if (!preempt_on) {
232                 wakeup_softirqd();
233                 goto out;
234         }
235
236         /*
237          * Adjust softirq count to SOFTIRQ_OFFSET which makes
238          * in_serving_softirq() become true.
239          */
240         cnt = SOFTIRQ_OFFSET;
241         __local_bh_enable(cnt, false);
242         __do_softirq();
243
244 out:
245         __local_bh_enable(cnt, preempt_on);
246         local_irq_restore(flags);
247 }
248 EXPORT_SYMBOL(__local_bh_enable_ip);
249
250 void softirq_preempt(void)
251 {
252         if (WARN_ON_ONCE(!preemptible()))
253                 return;
254
255         if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
256                 return;
257
258         __local_bh_enable(SOFTIRQ_OFFSET, true);
259         /* preemption point */
260         __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
261 }
262
263 /*
264  * Invoked from ksoftirqd_run() outside of the interrupt disabled section
265  * to acquire the per CPU local lock for reentrancy protection.
266  */
267 static inline void ksoftirqd_run_begin(void)
268 {
269         __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270         local_irq_disable();
271 }
272
273 /* Counterpart to ksoftirqd_run_begin() */
274 static inline void ksoftirqd_run_end(void)
275 {
276         __local_bh_enable(SOFTIRQ_OFFSET, true);
277         WARN_ON_ONCE(in_interrupt());
278         local_irq_enable();
279 }
280
281 static inline void softirq_handle_begin(void) { }
282 static inline void softirq_handle_end(void) { }
283
284 static inline bool should_wake_ksoftirqd(void)
285 {
286         return !this_cpu_read(softirq_ctrl.cnt);
287 }
288
289 static inline void invoke_softirq(void)
290 {
291         if (should_wake_ksoftirqd())
292                 wakeup_softirqd();
293 }
294
295 /*
296  * flush_smp_call_function_queue() can raise a soft interrupt in a function
297  * call. On RT kernels this is undesired and the only known functionality
298  * in the block layer which does this is disabled on RT. If soft interrupts
299  * get raised which haven't been raised before the flush, warn so it can be
300  * investigated.
301  */
302 void do_softirq_post_smp_call_flush(unsigned int was_pending)
303 {
304         if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
305                 invoke_softirq();
306 }
307
308 #else /* CONFIG_PREEMPT_RT */
309
310 /*
311  * This one is for softirq.c-internal use, where hardirqs are disabled
312  * legitimately:
313  */
314 #ifdef CONFIG_TRACE_IRQFLAGS
315 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
316 {
317         unsigned long flags;
318
319         WARN_ON_ONCE(in_hardirq());
320
321         raw_local_irq_save(flags);
322         /*
323          * The preempt tracer hooks into preempt_count_add and will break
324          * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
325          * is set and before current->softirq_enabled is cleared.
326          * We must manually increment preempt_count here and manually
327          * call the trace_preempt_off later.
328          */
329         __preempt_count_add(cnt);
330         /*
331          * Were softirqs turned off above:
332          */
333         if (softirq_count() == (cnt & SOFTIRQ_MASK))
334                 lockdep_softirqs_off(ip);
335         raw_local_irq_restore(flags);
336
337         if (preempt_count() == cnt) {
338 #ifdef CONFIG_DEBUG_PREEMPT
339                 current->preempt_disable_ip = get_lock_parent_ip();
340 #endif
341                 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
342         }
343 }
344 EXPORT_SYMBOL(__local_bh_disable_ip);
345 #endif /* CONFIG_TRACE_IRQFLAGS */
346
347 static void __local_bh_enable(unsigned int cnt)
348 {
349         lockdep_assert_irqs_disabled();
350
351         if (preempt_count() == cnt)
352                 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
353
354         if (softirq_count() == (cnt & SOFTIRQ_MASK))
355                 lockdep_softirqs_on(_RET_IP_);
356
357         __preempt_count_sub(cnt);
358 }
359
360 /*
361  * Special-case - softirqs can safely be enabled by __do_softirq(),
362  * without processing still-pending softirqs:
363  */
364 void _local_bh_enable(void)
365 {
366         WARN_ON_ONCE(in_hardirq());
367         __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
368 }
369 EXPORT_SYMBOL(_local_bh_enable);
370
371 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
372 {
373         WARN_ON_ONCE(in_hardirq());
374         lockdep_assert_irqs_enabled();
375 #ifdef CONFIG_TRACE_IRQFLAGS
376         local_irq_disable();
377 #endif
378         /*
379          * Are softirqs going to be turned on now:
380          */
381         if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
382                 lockdep_softirqs_on(ip);
383         /*
384          * Keep preemption disabled until we are done with
385          * softirq processing:
386          */
387         __preempt_count_sub(cnt - 1);
388
389         if (unlikely(!in_interrupt() && local_softirq_pending())) {
390                 /*
391                  * Run softirq if any pending. And do it in its own stack
392                  * as we may be calling this deep in a task call stack already.
393                  */
394                 do_softirq();
395         }
396
397         preempt_count_dec();
398 #ifdef CONFIG_TRACE_IRQFLAGS
399         local_irq_enable();
400 #endif
401         preempt_check_resched();
402 }
403 EXPORT_SYMBOL(__local_bh_enable_ip);
404
405 static inline void softirq_handle_begin(void)
406 {
407         __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
408 }
409
410 static inline void softirq_handle_end(void)
411 {
412         __local_bh_enable(SOFTIRQ_OFFSET);
413         WARN_ON_ONCE(in_interrupt());
414 }
415
416 static inline void ksoftirqd_run_begin(void)
417 {
418         local_irq_disable();
419 }
420
421 static inline void ksoftirqd_run_end(void)
422 {
423         local_irq_enable();
424 }
425
426 static inline bool should_wake_ksoftirqd(void)
427 {
428         return true;
429 }
430
431 static inline void invoke_softirq(void)
432 {
433         if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
434 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
435                 /*
436                  * We can safely execute softirq on the current stack if
437                  * it is the irq stack, because it should be near empty
438                  * at this stage.
439                  */
440                 __do_softirq();
441 #else
442                 /*
443                  * Otherwise, irq_exit() is called on the task stack that can
444                  * be potentially deep already. So call softirq in its own stack
445                  * to prevent from any overrun.
446                  */
447                 do_softirq_own_stack();
448 #endif
449         } else {
450                 wakeup_softirqd();
451         }
452 }
453
454 asmlinkage __visible void do_softirq(void)
455 {
456         __u32 pending;
457         unsigned long flags;
458
459         if (in_interrupt())
460                 return;
461
462         local_irq_save(flags);
463
464         pending = local_softirq_pending();
465
466         if (pending)
467                 do_softirq_own_stack();
468
469         local_irq_restore(flags);
470 }
471
472 #endif /* !CONFIG_PREEMPT_RT */
473
474 /*
475  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
476  * but break the loop if need_resched() is set or after 2 ms.
477  * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
478  * certain cases, such as stop_machine(), jiffies may cease to
479  * increment and so we need the MAX_SOFTIRQ_RESTART limit as
480  * well to make sure we eventually return from this method.
481  *
482  * These limits have been established via experimentation.
483  * The two things to balance is latency against fairness -
484  * we want to handle softirqs as soon as possible, but they
485  * should not be able to lock up the box.
486  */
487 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
488 #define MAX_SOFTIRQ_RESTART 10
489
490 #ifdef CONFIG_TRACE_IRQFLAGS
491 /*
492  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
493  * to keep the lockdep irq context tracking as tight as possible in order to
494  * not miss-qualify lock contexts and miss possible deadlocks.
495  */
496
497 static inline bool lockdep_softirq_start(void)
498 {
499         bool in_hardirq = false;
500
501         if (lockdep_hardirq_context()) {
502                 in_hardirq = true;
503                 lockdep_hardirq_exit();
504         }
505
506         lockdep_softirq_enter();
507
508         return in_hardirq;
509 }
510
511 static inline void lockdep_softirq_end(bool in_hardirq)
512 {
513         lockdep_softirq_exit();
514
515         if (in_hardirq)
516                 lockdep_hardirq_enter();
517 }
518 #else
519 static inline bool lockdep_softirq_start(void) { return false; }
520 static inline void lockdep_softirq_end(bool in_hardirq) { }
521 #endif
522
523 asmlinkage __visible void __softirq_entry __do_softirq(void)
524 {
525         unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
526         unsigned long old_flags = current->flags;
527         int max_restart = MAX_SOFTIRQ_RESTART;
528         struct softirq_action *h;
529         bool in_hardirq;
530         __u32 pending;
531         int softirq_bit;
532
533         /*
534          * Mask out PF_MEMALLOC as the current task context is borrowed for the
535          * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
536          * again if the socket is related to swapping.
537          */
538         current->flags &= ~PF_MEMALLOC;
539
540         pending = local_softirq_pending();
541
542         softirq_handle_begin();
543         in_hardirq = lockdep_softirq_start();
544         account_softirq_enter(current);
545
546 restart:
547         /* Reset the pending bitmask before enabling irqs */
548         set_softirq_pending(0);
549
550         local_irq_enable();
551
552         h = softirq_vec;
553
554         while ((softirq_bit = ffs(pending))) {
555                 unsigned int vec_nr;
556                 int prev_count;
557
558                 h += softirq_bit - 1;
559
560                 vec_nr = h - softirq_vec;
561                 prev_count = preempt_count();
562
563                 kstat_incr_softirqs_this_cpu(vec_nr);
564
565                 trace_softirq_entry(vec_nr);
566                 h->action(h);
567                 trace_softirq_exit(vec_nr);
568                 if (unlikely(prev_count != preempt_count())) {
569                         pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
570                                vec_nr, softirq_to_name[vec_nr], h->action,
571                                prev_count, preempt_count());
572                         preempt_count_set(prev_count);
573                 }
574                 h++;
575                 pending >>= softirq_bit;
576         }
577
578         if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
579             __this_cpu_read(ksoftirqd) == current)
580                 rcu_softirq_qs();
581
582         local_irq_disable();
583
584         pending = local_softirq_pending();
585         if (pending) {
586                 if (time_before(jiffies, end) && !need_resched() &&
587                     --max_restart)
588                         goto restart;
589
590                 wakeup_softirqd();
591         }
592
593         account_softirq_exit(current);
594         lockdep_softirq_end(in_hardirq);
595         softirq_handle_end();
596         current_restore_flags(old_flags, PF_MEMALLOC);
597 }
598
599 /**
600  * irq_enter_rcu - Enter an interrupt context with RCU watching
601  */
602 void irq_enter_rcu(void)
603 {
604         __irq_enter_raw();
605
606         if (tick_nohz_full_cpu(smp_processor_id()) ||
607             (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
608                 tick_irq_enter();
609
610         account_hardirq_enter(current);
611 }
612
613 /**
614  * irq_enter - Enter an interrupt context including RCU update
615  */
616 void irq_enter(void)
617 {
618         ct_irq_enter();
619         irq_enter_rcu();
620 }
621
622 static inline void tick_irq_exit(void)
623 {
624 #ifdef CONFIG_NO_HZ_COMMON
625         int cpu = smp_processor_id();
626
627         /* Make sure that timer wheel updates are propagated */
628         if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
629                 if (!in_hardirq())
630                         tick_nohz_irq_exit();
631         }
632 #endif
633 }
634
635 #ifdef CONFIG_PREEMPT_RT
636 DEFINE_PER_CPU(struct task_struct *, timersd);
637 DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
638
639 static void wake_timersd(void)
640 {
641         struct task_struct *tsk = __this_cpu_read(timersd);
642
643         if (tsk)
644                 wake_up_process(tsk);
645 }
646
647 #else
648
649 static inline void wake_timersd(void) { }
650
651 #endif
652
653 static inline void __irq_exit_rcu(void)
654 {
655 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
656         local_irq_disable();
657 #else
658         lockdep_assert_irqs_disabled();
659 #endif
660         account_hardirq_exit(current);
661         preempt_count_sub(HARDIRQ_OFFSET);
662         if (!in_interrupt() && local_softirq_pending())
663                 invoke_softirq();
664
665         if (IS_ENABLED(CONFIG_PREEMPT_RT) && local_pending_timers() &&
666             !(in_nmi() | in_hardirq()))
667                 wake_timersd();
668
669         tick_irq_exit();
670 }
671
672 /**
673  * irq_exit_rcu() - Exit an interrupt context without updating RCU
674  *
675  * Also processes softirqs if needed and possible.
676  */
677 void irq_exit_rcu(void)
678 {
679         __irq_exit_rcu();
680          /* must be last! */
681         lockdep_hardirq_exit();
682 }
683
684 /**
685  * irq_exit - Exit an interrupt context, update RCU and lockdep
686  *
687  * Also processes softirqs if needed and possible.
688  */
689 void irq_exit(void)
690 {
691         __irq_exit_rcu();
692         ct_irq_exit();
693          /* must be last! */
694         lockdep_hardirq_exit();
695 }
696
697 /*
698  * This function must run with irqs disabled!
699  */
700 inline void raise_softirq_irqoff(unsigned int nr)
701 {
702         __raise_softirq_irqoff(nr);
703
704         /*
705          * If we're in an interrupt or softirq, we're done
706          * (this also catches softirq-disabled code). We will
707          * actually run the softirq once we return from
708          * the irq or softirq.
709          *
710          * Otherwise we wake up ksoftirqd to make sure we
711          * schedule the softirq soon.
712          */
713         if (!in_interrupt() && should_wake_ksoftirqd())
714                 wakeup_softirqd();
715 }
716
717 void raise_softirq(unsigned int nr)
718 {
719         unsigned long flags;
720
721         local_irq_save(flags);
722         raise_softirq_irqoff(nr);
723         local_irq_restore(flags);
724 }
725
726 void __raise_softirq_irqoff(unsigned int nr)
727 {
728         lockdep_assert_irqs_disabled();
729         trace_softirq_raise(nr);
730         or_softirq_pending(1UL << nr);
731 }
732
733 void open_softirq(int nr, void (*action)(struct softirq_action *))
734 {
735         softirq_vec[nr].action = action;
736 }
737
738 /*
739  * Tasklets
740  */
741 struct tasklet_head {
742         struct tasklet_struct *head;
743         struct tasklet_struct **tail;
744 };
745
746 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
747 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
748
749 static void __tasklet_schedule_common(struct tasklet_struct *t,
750                                       struct tasklet_head __percpu *headp,
751                                       unsigned int softirq_nr)
752 {
753         struct tasklet_head *head;
754         unsigned long flags;
755
756         local_irq_save(flags);
757         head = this_cpu_ptr(headp);
758         t->next = NULL;
759         *head->tail = t;
760         head->tail = &(t->next);
761         raise_softirq_irqoff(softirq_nr);
762         local_irq_restore(flags);
763 }
764
765 void __tasklet_schedule(struct tasklet_struct *t)
766 {
767         __tasklet_schedule_common(t, &tasklet_vec,
768                                   TASKLET_SOFTIRQ);
769 }
770 EXPORT_SYMBOL(__tasklet_schedule);
771
772 void __tasklet_hi_schedule(struct tasklet_struct *t)
773 {
774         __tasklet_schedule_common(t, &tasklet_hi_vec,
775                                   HI_SOFTIRQ);
776 }
777 EXPORT_SYMBOL(__tasklet_hi_schedule);
778
779 static bool tasklet_clear_sched(struct tasklet_struct *t)
780 {
781         if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
782                 wake_up_var(&t->state);
783                 return true;
784         }
785
786         WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
787                   t->use_callback ? "callback" : "func",
788                   t->use_callback ? (void *)t->callback : (void *)t->func);
789
790         return false;
791 }
792
793 static void tasklet_action_common(struct softirq_action *a,
794                                   struct tasklet_head *tl_head,
795                                   unsigned int softirq_nr)
796 {
797         struct tasklet_struct *list;
798
799         local_irq_disable();
800         list = tl_head->head;
801         tl_head->head = NULL;
802         tl_head->tail = &tl_head->head;
803         local_irq_enable();
804
805         while (list) {
806                 struct tasklet_struct *t = list;
807
808                 list = list->next;
809
810                 if (tasklet_trylock(t)) {
811                         if (!atomic_read(&t->count)) {
812                                 if (tasklet_clear_sched(t)) {
813                                         if (t->use_callback) {
814                                                 trace_tasklet_entry(t, t->callback);
815                                                 t->callback(t);
816                                                 trace_tasklet_exit(t, t->callback);
817                                         } else {
818                                                 trace_tasklet_entry(t, t->func);
819                                                 t->func(t->data);
820                                                 trace_tasklet_exit(t, t->func);
821                                         }
822                                 }
823                                 tasklet_unlock(t);
824                                 continue;
825                         }
826                         tasklet_unlock(t);
827                 }
828
829                 local_irq_disable();
830                 t->next = NULL;
831                 *tl_head->tail = t;
832                 tl_head->tail = &t->next;
833                 __raise_softirq_irqoff(softirq_nr);
834                 local_irq_enable();
835         }
836 }
837
838 static __latent_entropy void tasklet_action(struct softirq_action *a)
839 {
840         tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
841 }
842
843 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
844 {
845         tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
846 }
847
848 void tasklet_setup(struct tasklet_struct *t,
849                    void (*callback)(struct tasklet_struct *))
850 {
851         t->next = NULL;
852         t->state = 0;
853         atomic_set(&t->count, 0);
854         t->callback = callback;
855         t->use_callback = true;
856         t->data = 0;
857 }
858 EXPORT_SYMBOL(tasklet_setup);
859
860 void tasklet_init(struct tasklet_struct *t,
861                   void (*func)(unsigned long), unsigned long data)
862 {
863         t->next = NULL;
864         t->state = 0;
865         atomic_set(&t->count, 0);
866         t->func = func;
867         t->use_callback = false;
868         t->data = data;
869 }
870 EXPORT_SYMBOL(tasklet_init);
871
872 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
873 /*
874  * Do not use in new code. Waiting for tasklets from atomic contexts is
875  * error prone and should be avoided.
876  */
877 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
878 {
879         while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
880                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
881                         /*
882                          * Prevent a live lock when current preempted soft
883                          * interrupt processing or prevents ksoftirqd from
884                          * running. If the tasklet runs on a different CPU
885                          * then this has no effect other than doing the BH
886                          * disable/enable dance for nothing.
887                          */
888                         local_bh_disable();
889                         local_bh_enable();
890                 } else {
891                         cpu_relax();
892                 }
893         }
894 }
895 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
896 #endif
897
898 void tasklet_kill(struct tasklet_struct *t)
899 {
900         if (in_interrupt())
901                 pr_notice("Attempt to kill tasklet from interrupt\n");
902
903         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
904                 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
905
906         tasklet_unlock_wait(t);
907         tasklet_clear_sched(t);
908 }
909 EXPORT_SYMBOL(tasklet_kill);
910
911 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
912 void tasklet_unlock(struct tasklet_struct *t)
913 {
914         smp_mb__before_atomic();
915         clear_bit(TASKLET_STATE_RUN, &t->state);
916         smp_mb__after_atomic();
917         wake_up_var(&t->state);
918 }
919 EXPORT_SYMBOL_GPL(tasklet_unlock);
920
921 void tasklet_unlock_wait(struct tasklet_struct *t)
922 {
923         wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
924 }
925 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
926 #endif
927
928 void __init softirq_init(void)
929 {
930         int cpu;
931
932         for_each_possible_cpu(cpu) {
933                 per_cpu(tasklet_vec, cpu).tail =
934                         &per_cpu(tasklet_vec, cpu).head;
935                 per_cpu(tasklet_hi_vec, cpu).tail =
936                         &per_cpu(tasklet_hi_vec, cpu).head;
937         }
938
939         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
940         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
941 }
942
943 static int ksoftirqd_should_run(unsigned int cpu)
944 {
945         return local_softirq_pending();
946 }
947
948 static void run_ksoftirqd(unsigned int cpu)
949 {
950         ksoftirqd_run_begin();
951         if (local_softirq_pending()) {
952                 /*
953                  * We can safely run softirq on inline stack, as we are not deep
954                  * in the task stack here.
955                  */
956                 __do_softirq();
957                 ksoftirqd_run_end();
958                 cond_resched();
959                 return;
960         }
961         ksoftirqd_run_end();
962 }
963
964 #ifdef CONFIG_HOTPLUG_CPU
965 static int takeover_tasklets(unsigned int cpu)
966 {
967         /* CPU is dead, so no lock needed. */
968         local_irq_disable();
969
970         /* Find end, append list for that CPU. */
971         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
972                 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
973                 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
974                 per_cpu(tasklet_vec, cpu).head = NULL;
975                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
976         }
977         raise_softirq_irqoff(TASKLET_SOFTIRQ);
978
979         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
980                 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
981                 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
982                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
983                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
984         }
985         raise_softirq_irqoff(HI_SOFTIRQ);
986
987         local_irq_enable();
988         return 0;
989 }
990 #else
991 #define takeover_tasklets       NULL
992 #endif /* CONFIG_HOTPLUG_CPU */
993
994 static struct smp_hotplug_thread softirq_threads = {
995         .store                  = &ksoftirqd,
996         .thread_should_run      = ksoftirqd_should_run,
997         .thread_fn              = run_ksoftirqd,
998         .thread_comm            = "ksoftirqd/%u",
999 };
1000
1001 #ifdef CONFIG_PREEMPT_RT
1002 static void timersd_setup(unsigned int cpu)
1003 {
1004         sched_set_fifo_low(current);
1005 }
1006
1007 static int timersd_should_run(unsigned int cpu)
1008 {
1009         return local_pending_timers();
1010 }
1011
1012 static void run_timersd(unsigned int cpu)
1013 {
1014         unsigned int timer_si;
1015
1016         ksoftirqd_run_begin();
1017
1018         timer_si = local_pending_timers();
1019         __this_cpu_write(pending_timer_softirq, 0);
1020         or_softirq_pending(timer_si);
1021
1022         __do_softirq();
1023
1024         ksoftirqd_run_end();
1025 }
1026
1027 static void raise_ktimers_thread(unsigned int nr)
1028 {
1029         trace_softirq_raise(nr);
1030         __this_cpu_or(pending_timer_softirq, 1 << nr);
1031 }
1032
1033 void raise_hrtimer_softirq(void)
1034 {
1035         raise_ktimers_thread(HRTIMER_SOFTIRQ);
1036 }
1037
1038 void raise_timer_softirq(void)
1039 {
1040         unsigned long flags;
1041
1042         local_irq_save(flags);
1043         raise_ktimers_thread(TIMER_SOFTIRQ);
1044         wake_timersd();
1045         local_irq_restore(flags);
1046 }
1047
1048 static struct smp_hotplug_thread timer_threads = {
1049         .store                  = &timersd,
1050         .setup                  = timersd_setup,
1051         .thread_should_run      = timersd_should_run,
1052         .thread_fn              = run_timersd,
1053         .thread_comm            = "ktimers/%u",
1054 };
1055 #endif
1056
1057 static __init int spawn_ksoftirqd(void)
1058 {
1059         cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
1060                                   takeover_tasklets);
1061         BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1062 #ifdef CONFIG_PREEMPT_RT
1063         BUG_ON(smpboot_register_percpu_thread(&timer_threads));
1064 #endif
1065         return 0;
1066 }
1067 early_initcall(spawn_ksoftirqd);
1068
1069 /*
1070  * [ These __weak aliases are kept in a separate compilation unit, so that
1071  *   GCC does not inline them incorrectly. ]
1072  */
1073
1074 int __init __weak early_irq_init(void)
1075 {
1076         return 0;
1077 }
1078
1079 int __init __weak arch_probe_nr_irqs(void)
1080 {
1081         return NR_IRQS_LEGACY;
1082 }
1083
1084 int __init __weak arch_early_irq_init(void)
1085 {
1086         return 0;
1087 }
1088
1089 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1090 {
1091         return from;
1092 }