locking/lockdep: Improve 'invalid wait context' splat
authorPeter Zijlstra <peterz@infradead.org>
Tue, 31 Mar 2020 18:38:12 +0000 (20:38 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 8 Apr 2020 10:05:07 +0000 (12:05 +0200)
The 'invalid wait context' splat doesn't print all the information
required to reconstruct / validate the error, specifically the
irq-context state is missing.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/lockdep.c

index 1511690..ac10db6 100644 (file)
@@ -3952,10 +3952,36 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
        return ret;
 }
 
+static inline short task_wait_context(struct task_struct *curr)
+{
+       /*
+        * Set appropriate wait type for the context; for IRQs we have to take
+        * into account force_irqthread as that is implied by PREEMPT_RT.
+        */
+       if (curr->hardirq_context) {
+               /*
+                * Check if force_irqthreads will run us threaded.
+                */
+               if (curr->hardirq_threaded || curr->irq_config)
+                       return LD_WAIT_CONFIG;
+
+               return LD_WAIT_SPIN;
+       } else if (curr->softirq_context) {
+               /*
+                * Softirqs are always threaded.
+                */
+               return LD_WAIT_CONFIG;
+       }
+
+       return LD_WAIT_MAX;
+}
+
 static int
 print_lock_invalid_wait_context(struct task_struct *curr,
                                struct held_lock *hlock)
 {
+       short curr_inner;
+
        if (!debug_locks_off())
                return 0;
        if (debug_locks_silent)
@@ -3971,6 +3997,10 @@ print_lock_invalid_wait_context(struct task_struct *curr,
        print_lock(hlock);
 
        pr_warn("other info that might help us debug this:\n");
+
+       curr_inner = task_wait_context(curr);
+       pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
+
        lockdep_print_held_locks(curr);
 
        pr_warn("stack backtrace:\n");
@@ -4017,26 +4047,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
        }
        depth++;
 
-       /*
-        * Set appropriate wait type for the context; for IRQs we have to take
-        * into account force_irqthread as that is implied by PREEMPT_RT.
-        */
-       if (curr->hardirq_context) {
-               /*
-                * Check if force_irqthreads will run us threaded.
-                */
-               if (curr->hardirq_threaded || curr->irq_config)
-                       curr_inner = LD_WAIT_CONFIG;
-               else
-                       curr_inner = LD_WAIT_SPIN;
-       } else if (curr->softirq_context) {
-               /*
-                * Softirqs are always threaded.
-                */
-               curr_inner = LD_WAIT_CONFIG;
-       } else {
-               curr_inner = LD_WAIT_MAX;
-       }
+       curr_inner = task_wait_context(curr);
 
        for (; depth < curr->lockdep_depth; depth++) {
                struct held_lock *prev = curr->held_locks + depth;