lockdep: Use raw_cpu_*() for per-cpu variables
authorPeter Zijlstra <peterz@infradead.org>
Thu, 20 Aug 2020 07:13:30 +0000 (09:13 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 26 Aug 2020 10:41:53 +0000 (12:41 +0200)
Sven reported that commit a21ee6055c30 ("lockdep: Change
hardirq{s_enabled,_context} to per-cpu variables") caused trouble on
s390 because their this_cpu_*() primitives disable preemption which
then lands back tracing.

On the one hand, per-cpu ops should use preempt_*able_notrace() and
raw_local_irq_*(), on the other hand, we can trivialy use raw_cpu_*()
ops for this.

Fixes: a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables")
Reported-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200821085348.192346882@infradead.org
include/linux/irqflags.h
include/linux/lockdep.h
kernel/locking/lockdep.c

index bd5c557..d7e50a2 100644 (file)
@@ -53,13 +53,13 @@ DECLARE_PER_CPU(int, hardirq_context);
   extern void trace_hardirqs_off_finish(void);
   extern void trace_hardirqs_on(void);
   extern void trace_hardirqs_off(void);
-# define lockdep_hardirq_context()     (this_cpu_read(hardirq_context))
+# define lockdep_hardirq_context()     (raw_cpu_read(hardirq_context))
 # define lockdep_softirq_context(p)    ((p)->softirq_context)
 # define lockdep_hardirqs_enabled()    (this_cpu_read(hardirqs_enabled))
 # define lockdep_softirqs_enabled(p)   ((p)->softirqs_enabled)
 # define lockdep_hardirq_enter()                       \
 do {                                                   \
-       if (this_cpu_inc_return(hardirq_context) == 1)  \
+       if (__this_cpu_inc_return(hardirq_context) == 1)\
                current->hardirq_threaded = 0;          \
 } while (0)
 # define lockdep_hardirq_threaded()            \
@@ -68,7 +68,7 @@ do {                                          \
 } while (0)
 # define lockdep_hardirq_exit()                        \
 do {                                           \
-       this_cpu_dec(hardirq_context);          \
+       __this_cpu_dec(hardirq_context);        \
 } while (0)
 # define lockdep_softirq_enter()               \
 do {                                           \
index 62a382d..6a584b3 100644 (file)
@@ -535,19 +535,27 @@ do {                                                                      \
 DECLARE_PER_CPU(int, hardirqs_enabled);
 DECLARE_PER_CPU(int, hardirq_context);
 
+/*
+ * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
+ * per-cpu variables. This is required because this_cpu_read() will potentially
+ * call into preempt/irq-disable and that obviously isn't right. This is also
+ * correct because when IRQs are enabled, it doesn't matter if we accidentally
+ * read the value from our previous CPU.
+ */
+
 #define lockdep_assert_irqs_enabled()                                  \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled));  \
+       WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled));   \
 } while (0)
 
 #define lockdep_assert_irqs_disabled()                                 \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled));   \
+       WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled));    \
 } while (0)
 
 #define lockdep_assert_in_irq()                                                \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context));   \
+       WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context));    \
 } while (0)
 
 #define lockdep_assert_preemption_enabled()                            \
@@ -555,7 +563,7 @@ do {                                                                        \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
                     debug_locks                        &&              \
                     (preempt_count() != 0              ||              \
-                     !this_cpu_read(hardirqs_enabled)));               \
+                     !raw_cpu_read(hardirqs_enabled)));                \
 } while (0)
 
 #define lockdep_assert_preemption_disabled()                           \
@@ -563,7 +571,7 @@ do {                                                                        \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
                     debug_locks                        &&              \
                     (preempt_count() == 0              &&              \
-                     this_cpu_read(hardirqs_enabled)));                \
+                     raw_cpu_read(hardirqs_enabled)));                 \
 } while (0)
 
 #else
index 2fad21d..c872e95 100644 (file)
@@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
 
 skip_checks:
        /* we'll do an OFF -> ON transition: */
-       this_cpu_write(hardirqs_enabled, 1);
+       __this_cpu_write(hardirqs_enabled, 1);
        trace->hardirq_enable_ip = ip;
        trace->hardirq_enable_event = ++trace->irq_events;
        debug_atomic_inc(hardirqs_on_events);
@@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
                /*
                 * We have done an ON -> OFF transition:
                 */
-               this_cpu_write(hardirqs_enabled, 0);
+               __this_cpu_write(hardirqs_enabled, 0);
                trace->hardirq_disable_ip = ip;
                trace->hardirq_disable_event = ++trace->irq_events;
                debug_atomic_inc(hardirqs_off_events);