1 #ifndef LINUX_HARDIRQ_H
2 #define LINUX_HARDIRQ_H
4 #include <linux/preempt_mask.h>
5 #include <linux/lockdep.h>
6 #include <linux/ftrace_irq.h>
7 #include <linux/vtime.h>
8 #include <asm/hardirq.h>
11 extern void synchronize_irq(unsigned int irq);
13 #if defined(CONFIG_TINY_RCU)
15 static inline void rcu_nmi_enter(void)
19 static inline void rcu_nmi_exit(void)
24 extern void rcu_nmi_enter(void);
25 extern void rcu_nmi_exit(void);
29 * It is safe to do non-atomic ops on ->hardirq_context,
30 * because NMI handlers may not preempt and the ops are
31 * always balanced, so the interrupted value of ->hardirq_context
32 * will always be restored.
34 #define __irq_enter() \
36 account_irq_enter_time(current); \
37 preempt_count_add(HARDIRQ_OFFSET); \
38 trace_hardirq_enter(); \
42 * Enter irq context (on NO_HZ, update jiffies):
44 extern void irq_enter(void);
47 * Exit irq context without processing softirqs:
49 #define __irq_exit() \
51 trace_hardirq_exit(); \
52 account_irq_exit_time(current); \
53 preempt_count_sub(HARDIRQ_OFFSET); \
57 * Exit irq context and process softirqs if needed:
59 extern void irq_exit(void);
66 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
68 trace_hardirq_enter(); \
73 trace_hardirq_exit(); \
76 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
81 #endif /* LINUX_HARDIRQ_H */