2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <linux/sched.h>
16 #include <linux/tick.h>
17 #include <linux/cpu.h>
18 #include <linux/notifier.h>
19 #include <linux/smp.h>
20 #include <asm/processor.h>
23 static DEFINE_PER_CPU(struct llist_head, raised_list);
24 static DEFINE_PER_CPU(struct llist_head, lazy_list);
27 * Claim the entry so that no one else will poke at it.
29 static bool irq_work_claim(struct irq_work *work)
31 unsigned long flags, oflags, nflags;
34 * Start with our best wish as a premise but only trust any
35 * flag value after cmpxchg() result.
37 flags = work->flags & ~IRQ_WORK_PENDING;
39 nflags = flags | IRQ_WORK_CLAIMED;
40 oflags = cmpxchg(&work->flags, flags, nflags);
43 if (oflags & IRQ_WORK_PENDING)
52 void __weak arch_irq_work_raise(void)
55 * Lame architectures will get the timer tick callback
59 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
60 static void __irq_work_queue_local(struct irq_work *work)
62 /* If the work is "lazy", handle it from next tick if any */
63 if (work->flags & IRQ_WORK_LAZY) {
64 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
65 tick_nohz_tick_stopped())
66 arch_irq_work_raise();
68 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
69 arch_irq_work_raise();
73 /* Enqueue the irq work @work on the current CPU */
74 bool irq_work_queue(struct irq_work *work)
76 /* Only queue if not already pending */
77 if (!irq_work_claim(work))
80 /* Queue the entry and raise the IPI if needed. */
82 __irq_work_queue_local(work);
87 EXPORT_SYMBOL_GPL(irq_work_queue);
90 * Enqueue the irq_work @work on @cpu unless it's already pending
93 * Can be re-enqueued while the callback is still in progress.
95 bool irq_work_queue_on(struct irq_work *work, int cpu)
98 return irq_work_queue(work);
100 #else /* CONFIG_SMP: */
101 /* All work should have been flushed before going offline */
102 WARN_ON_ONCE(cpu_is_offline(cpu));
104 /* Only queue if not already pending */
105 if (!irq_work_claim(work))
109 if (cpu != smp_processor_id()) {
110 /* Arch remote IPI send/receive backend aren't NMI safe */
111 WARN_ON_ONCE(in_nmi());
112 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
113 arch_send_call_function_single_ipi(cpu);
115 __irq_work_queue_local(work);
120 #endif /* CONFIG_SMP */
124 bool irq_work_needs_cpu(void)
126 struct llist_head *raised, *lazy;
128 raised = this_cpu_ptr(&raised_list);
129 lazy = this_cpu_ptr(&lazy_list);
131 if (llist_empty(raised) || arch_irq_work_has_interrupt())
132 if (llist_empty(lazy))
135 /* All work should have been flushed before going offline */
136 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
141 static void irq_work_run_list(struct llist_head *list)
143 struct irq_work *work, *tmp;
144 struct llist_node *llnode;
147 BUG_ON(!irqs_disabled());
149 if (llist_empty(list))
152 llnode = llist_del_all(list);
153 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
155 * Clear the PENDING bit, after this point the @work
157 * Make it immediately visible so that other CPUs trying
158 * to claim that work don't rely on us to handle their data
159 * while we are in the middle of the func.
161 flags = work->flags & ~IRQ_WORK_PENDING;
162 xchg(&work->flags, flags);
166 * Clear the BUSY bit and return to the free state if
167 * no-one else claimed it meanwhile.
169 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
174 * hotplug calls this through:
175 * hotplug_cfd() -> flush_smp_call_function_queue()
177 void irq_work_run(void)
179 irq_work_run_list(this_cpu_ptr(&raised_list));
180 irq_work_run_list(this_cpu_ptr(&lazy_list));
182 EXPORT_SYMBOL_GPL(irq_work_run);
184 void irq_work_tick(void)
186 struct llist_head *raised = this_cpu_ptr(&raised_list);
188 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
189 irq_work_run_list(raised);
190 irq_work_run_list(this_cpu_ptr(&lazy_list));
194 * Synchronize against the irq_work @entry, ensures the entry is not
197 void irq_work_sync(struct irq_work *work)
199 lockdep_assert_irqs_enabled();
201 while (work->flags & IRQ_WORK_BUSY)
204 EXPORT_SYMBOL_GPL(irq_work_sync);