lockdep: Annotate irq_work
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Sat, 21 Mar 2020 11:26:03 +0000 (12:26 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Sat, 21 Mar 2020 15:00:24 +0000 (16:00 +0100)
Mark irq_work items with IRQ_WORK_HARD_IRQ which should be invoked in
hardirq context even on PREEMPT_RT. IRQ_WORK without this flag will be
invoked in softirq context on PREEMPT_RT.

Set ->irq_config to 1 for the IRQ_WORK items which are invoked in softirq
context so lockdep knows that these can safely acquire a spinlock_t.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200321113242.643576700@linutronix.de
include/linux/irq_work.h
include/linux/irqflags.h
kernel/irq_work.c
kernel/rcu/tree.c
kernel/time/tick-sched.c

index 02da997..3b752e8 100644 (file)
@@ -18,6 +18,8 @@
 
 /* Doesn't want IPI, wait for tick: */
 #define IRQ_WORK_LAZY          BIT(2)
+/* Run hard IRQ context, even on RT */
+#define IRQ_WORK_HARD_IRQ      BIT(3)
 
 #define IRQ_WORK_CLAIMED       (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
 
index 9c17f9c..f23f540 100644 (file)
@@ -69,6 +69,17 @@ do {                                         \
                        current->irq_config = 0;        \
          } while (0)
 
+# define lockdep_irq_work_enter(__work)                                        \
+         do {                                                          \
+                 if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+                       current->irq_config = 1;                        \
+         } while (0)
+# define lockdep_irq_work_exit(__work)                                 \
+         do {                                                          \
+                 if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+                       current->irq_config = 0;                        \
+         } while (0)
+
 #else
 # define trace_hardirqs_on()           do { } while (0)
 # define trace_hardirqs_off()          do { } while (0)
@@ -83,6 +94,8 @@ do {                                          \
 # define lockdep_softirq_exit()                do { } while (0)
 # define lockdep_hrtimer_enter(__hrtimer)              do { } while (0)
 # define lockdep_hrtimer_exit(__hrtimer)               do { } while (0)
+# define lockdep_irq_work_enter(__work)                do { } while (0)
+# define lockdep_irq_work_exit(__work)         do { } while (0)
 #endif
 
 #if defined(CONFIG_IRQSOFF_TRACER) || \
index 828cc30..48b5d1b 100644 (file)
@@ -153,7 +153,9 @@ static void irq_work_run_list(struct llist_head *list)
                 */
                flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
 
+               lockdep_irq_work_enter(work);
                work->func(work);
+               lockdep_irq_work_exit(work);
                /*
                 * Clear the BUSY bit and return to the free state if
                 * no-one else claimed it meanwhile.
index d91c915..5066d1d 100644 (file)
@@ -1113,6 +1113,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                    (rnp->ffmask & rdp->grpmask)) {
                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
+                       atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ);
                        rdp->rcu_iw_pending = true;
                        rdp->rcu_iw_gp_seq = rnp->gp_seq;
                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
index 4be756b..3e2dc9b 100644 (file)
@@ -245,6 +245,7 @@ static void nohz_full_kick_func(struct irq_work *work)
 
 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
        .func = nohz_full_kick_func,
+       .flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
 };
 
 /*