lockdep: Correctly annotate hardirq context in irq_exit()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / softirq.c
index d7d498d..eb0acf4 100644 (file)
@@ -29,7 +29,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
 
-#include <asm/irq.h>
 /*
    - No shared variables, all the data are CPU local.
    - If a softirq needs serialization, let it serialize itself
@@ -100,13 +99,13 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
 
        raw_local_irq_save(flags);
        /*
-        * The preempt tracer hooks into add_preempt_count and will break
+        * The preempt tracer hooks into preempt_count_add and will break
         * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
         * is set and before current->softirq_enabled is cleared.
         * We must manually increment preempt_count here and manually
         * call the trace_preempt_off later.
         */
-       preempt_count() += cnt;
+       __preempt_count_add(cnt);
        /*
         * Were softirqs turned off above:
         */
@@ -120,7 +119,7 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
 #else /* !CONFIG_TRACE_IRQFLAGS */
 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
-       add_preempt_count(cnt);
+       preempt_count_add(cnt);
        barrier();
 }
 #endif /* CONFIG_TRACE_IRQFLAGS */
@@ -134,12 +133,11 @@ EXPORT_SYMBOL(local_bh_disable);
 
 static void __local_bh_enable(unsigned int cnt)
 {
-       WARN_ON_ONCE(in_irq());
        WARN_ON_ONCE(!irqs_disabled());
 
        if (softirq_count() == cnt)
                trace_softirqs_on(_RET_IP_);
-       sub_preempt_count(cnt);
+       preempt_count_sub(cnt);
 }
 
 /*
@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
  */
 void _local_bh_enable(void)
 {
+       WARN_ON_ONCE(in_irq());
        __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 }
 
@@ -169,12 +168,17 @@ static inline void _local_bh_enable_ip(unsigned long ip)
         * Keep preemption disabled until we are done with
         * softirq processing:
         */
-       sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
+       preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
 
-       if (unlikely(!in_interrupt() && local_softirq_pending()))
+       if (unlikely(!in_interrupt() && local_softirq_pending())) {
+               /*
+                * Run softirq if any pending. And do it in its own stack
+                * as we may be calling this deep in a task call stack already.
+                */
                do_softirq();
+       }
 
-       dec_preempt_count();
+       preempt_count_dec();
 #ifdef CONFIG_TRACE_IRQFLAGS
        local_irq_enable();
 #endif
@@ -209,14 +213,52 @@ EXPORT_SYMBOL(local_bh_enable_ip);
 #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
 #define MAX_SOFTIRQ_RESTART 10
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * Convoluted means of passing __do_softirq() a message through the various
+ * architecture execute_on_stack() bits.
+ *
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
+ * to keep the lockdep irq context tracking as tight as possible in order to
+ * not miss-qualify lock contexts and miss possible deadlocks.
+ */
+static DEFINE_PER_CPU(int, softirq_from_hardirq);
+
+static inline void lockdep_softirq_from_hardirq(void)
+{
+       this_cpu_write(softirq_from_hardirq, 1);
+}
+
+static inline void lockdep_softirq_start(void)
+{
+       if (this_cpu_read(softirq_from_hardirq))
+               trace_hardirq_exit();
+       lockdep_softirq_enter();
+}
+
+static inline void lockdep_softirq_end(void)
+{
+       lockdep_softirq_exit();
+       if (this_cpu_read(softirq_from_hardirq)) {
+               this_cpu_write(softirq_from_hardirq, 0);
+               trace_hardirq_enter();
+       }
+}
+
+#else
+static inline void lockdep_softirq_from_hardirq(void) { }
+static inline void lockdep_softirq_start(void) { }
+static inline void lockdep_softirq_end(void) { }
+#endif
+
 asmlinkage void __do_softirq(void)
 {
-       struct softirq_action *h;
-       __u32 pending;
        unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
-       int cpu;
        unsigned long old_flags = current->flags;
        int max_restart = MAX_SOFTIRQ_RESTART;
+       struct softirq_action *h;
+       __u32 pending;
+       int cpu;
 
        /*
         * Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -229,7 +271,7 @@ asmlinkage void __do_softirq(void)
        account_irq_enter_time(current);
 
        __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
-       lockdep_softirq_enter();
+       lockdep_softirq_start();
 
        cpu = smp_processor_id();
 restart:
@@ -256,7 +298,7 @@ restart:
                                       " exited with %08x?\n", vec_nr,
                                       softirq_to_name[vec_nr], h->action,
                                       prev_count, preempt_count());
-                               preempt_count() = prev_count;
+                               preempt_count_set(prev_count);
                        }
 
                        rcu_bh_qs(cpu);
@@ -276,15 +318,13 @@ restart:
                wakeup_softirqd();
        }
 
-       lockdep_softirq_exit();
-
+       lockdep_softirq_end();
        account_irq_exit_time(current);
        __local_bh_enable(SOFTIRQ_OFFSET);
+       WARN_ON_ONCE(in_interrupt());
        tsk_restore_flags(current, old_flags, PF_MEMALLOC);
 }
 
-#ifndef __ARCH_HAS_DO_SOFTIRQ
-
 asmlinkage void do_softirq(void)
 {
        __u32 pending;
@@ -298,13 +338,11 @@ asmlinkage void do_softirq(void)
        pending = local_softirq_pending();
 
        if (pending)
-               __do_softirq();
+               do_softirq_own_stack();
 
        local_irq_restore(flags);
 }
 
-#endif
-
 /*
  * Enter an interrupt context.
  */
@@ -329,15 +367,22 @@ void irq_enter(void)
 static inline void invoke_softirq(void)
 {
        if (!force_irqthreads) {
+               lockdep_softirq_from_hardirq();
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
                /*
                 * We can safely execute softirq on the current stack if
                 * it is the irq stack, because it should be near empty
-                * at this stage. But we have no way to know if the arch
-                * calls irq_exit() on the irq stack. So call softirq
-                * in its own stack to prevent from any overrun on top
-                * of a potentially deep task stack.
+                * at this stage.
                 */
-               do_softirq();
+               __do_softirq();
+#else
+               /*
+                * Otherwise, irq_exit() is called on the task stack that can
+                * be potentially deep already. So call softirq in its own stack
+                * to prevent from any overrun.
+                */
+               do_softirq_own_stack();
+#endif
        } else {
                wakeup_softirqd();
        }
@@ -368,13 +413,13 @@ void irq_exit(void)
 #endif
 
        account_irq_exit_time(current);
-       trace_hardirq_exit();
-       sub_preempt_count(HARDIRQ_OFFSET);
+       preempt_count_sub(HARDIRQ_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
 
        tick_irq_exit();
        rcu_irq_exit();
+       trace_hardirq_exit(); /* must be last! */
 }
 
 /*
@@ -771,6 +816,10 @@ static void run_ksoftirqd(unsigned int cpu)
 {
        local_irq_disable();
        if (local_softirq_pending()) {
+               /*
+                * We can safely run softirq on inline stack, as we are not deep
+                * in the task stack here.
+                */
                __do_softirq();
                rcu_note_context_switch(cpu);
                local_irq_enable();