irq_work: Trace self-IPIs sent via arch_irq_work_raise()
authorValentin Schneider <vschneid@redhat.com>
Tue, 7 Mar 2023 14:35:55 +0000 (14:35 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 24 Mar 2023 10:01:27 +0000 (11:01 +0100)
IPIs sent to remote CPUs via irq_work_queue_on() are now covered by
trace_ipi_send_cpumask(), add another instance of the tracepoint to cover
self-IPIs.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20230307143558.294354-5-vschneid@redhat.com
kernel/irq_work.c

index 7afa40f..c33e88e 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/processor.h>
 #include <linux/kasan.h>
 
+#include <trace/events/ipi.h>
+
 static DEFINE_PER_CPU(struct llist_head, raised_list);
 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 static DEFINE_PER_CPU(struct task_struct *, irq_workd);
@@ -74,6 +76,16 @@ void __weak arch_irq_work_raise(void)
         */
 }
 
+static __always_inline void irq_work_raise(struct irq_work *work)
+{
+       if (trace_ipi_send_cpumask_enabled() && arch_irq_work_has_interrupt())
+               trace_ipi_send_cpumask(cpumask_of(smp_processor_id()),
+                                      _RET_IP_,
+                                      work->func);
+
+       arch_irq_work_raise();
+}
+
 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
 static void __irq_work_queue_local(struct irq_work *work)
 {
@@ -99,7 +111,7 @@ static void __irq_work_queue_local(struct irq_work *work)
 
        /* If the work is "lazy", handle it from next tick if any */
        if (!lazy_work || tick_nohz_tick_stopped())
-               arch_irq_work_raise();
+               irq_work_raise(work);
 }
 
 /* Enqueue the irq work @work on the current CPU */