1 #ifndef LINUX_HARDIRQ_H
2 #define LINUX_HARDIRQ_H
4 #include <linux/preempt_mask.h>
5 #include <linux/lockdep.h>
6 #include <linux/ftrace_irq.h>
7 #include <linux/vtime.h>
10 #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
11 extern void synchronize_irq(unsigned int irq);
13 # define synchronize_irq(irq) barrier()
16 #if defined(CONFIG_TINY_RCU)
18 static inline void rcu_nmi_enter(void)
22 static inline void rcu_nmi_exit(void)
27 extern void rcu_nmi_enter(void);
28 extern void rcu_nmi_exit(void);
32 * It is safe to do non-atomic ops on ->hardirq_context,
33 * because NMI handlers may not preempt and the ops are
34 * always balanced, so the interrupted value of ->hardirq_context
35 * will always be restored.
37 #define __irq_enter() \
39 account_irq_enter_time(current); \
40 add_preempt_count(HARDIRQ_OFFSET); \
41 trace_hardirq_enter(); \
45 * Enter irq context (on NO_HZ, update jiffies):
47 extern void irq_enter(void);
50 * Exit irq context without processing softirqs:
52 #define __irq_exit() \
54 trace_hardirq_exit(); \
55 account_irq_exit_time(current); \
56 sub_preempt_count(HARDIRQ_OFFSET); \
60 * Exit irq context and process softirqs if needed:
62 extern void irq_exit(void);
69 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
71 trace_hardirq_enter(); \
76 trace_hardirq_exit(); \
79 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
84 #endif /* LINUX_HARDIRQ_H */