2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/smp.h>
15 #include <linux/percpu.h>
16 #include <linux/hrtimer.h>
17 #include <linux/kref.h>
18 #include <linux/workqueue.h>
20 #include <linux/atomic.h>
21 #include <asm/ptrace.h>
22 #include <asm/system.h>
23 #include <trace/events/irq.h>
26 * These correspond to the IORESOURCE_IRQ_* defines in
27 * linux/ioport.h to select the interrupt line behaviour. When
28 * requesting an interrupt without specifying a IRQF_TRIGGER, the
29 * setting should be assumed to be "as already configured", which
30 * may be as per machine or firmware initialisation.
32 #define IRQF_TRIGGER_NONE 0x00000000
33 #define IRQF_TRIGGER_RISING 0x00000001
34 #define IRQF_TRIGGER_FALLING 0x00000002
35 #define IRQF_TRIGGER_HIGH 0x00000004
36 #define IRQF_TRIGGER_LOW 0x00000008
37 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
38 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
39 #define IRQF_TRIGGER_PROBE 0x00000010
42 * These flags used only by the kernel as part of the
43 * irq handling routines.
45 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
46 * DEPRECATED. This flag is a NOOP and scheduled to be removed
47 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
48 * IRQF_SHARED - allow sharing the irq among several devices
49 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
50 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
51 * IRQF_PERCPU - Interrupt is per cpu
52 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
53 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
54 * registered first in an shared interrupt is considered for
55 * performance reasons)
56 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
57 * Used by threaded interrupts which need to keep the
58 * irq line disabled until the threaded handler has been run.
59 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
60 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
61 * IRQF_NO_THREAD - Interrupt cannot be threaded
63 #define IRQF_DISABLED 0x00000020
64 #define IRQF_SAMPLE_RANDOM 0x00000040
65 #define IRQF_SHARED 0x00000080
66 #define IRQF_PROBE_SHARED 0x00000100
67 #define __IRQF_TIMER 0x00000200
68 #define IRQF_PERCPU 0x00000400
69 #define IRQF_NOBALANCING 0x00000800
70 #define IRQF_IRQPOLL 0x00001000
71 #define IRQF_ONESHOT 0x00002000
72 #define IRQF_NO_SUSPEND 0x00004000
73 #define IRQF_FORCE_RESUME 0x00008000
74 #define IRQF_NO_THREAD 0x00010000
76 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79 * These values can be returned by request_any_context_irq() and
80 * describe the context the interrupt will be run in.
82 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
83 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
90 typedef irqreturn_t (*irq_handler_t)(int, void *);
93 * struct irqaction - per interrupt action descriptor
94 * @handler: interrupt handler function
95 * @flags: flags (see IRQF_* above)
96 * @name: name of the device
97 * @dev_id: cookie to identify the device
98 * @next: pointer to the next irqaction for shared interrupts
99 * @irq: interrupt number
100 * @dir: pointer to the proc/irq/NN/name entry
101 * @thread_fn: interrupt handler function for threaded interrupts
102 * @thread: thread pointer for threaded interrupts
103 * @thread_flags: flags related to @thread
104 * @thread_mask: bitmask for keeping track of @thread activity
107 irq_handler_t handler;
110 struct irqaction *next;
112 irq_handler_t thread_fn;
113 struct task_struct *thread;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
117 struct proc_dir_entry *dir;
118 } ____cacheline_internodealigned_in_smp;
120 extern irqreturn_t no_action(int cpl, void *dev_id);
122 #ifdef CONFIG_GENERIC_HARDIRQS
123 extern int __must_check
124 request_threaded_irq(unsigned int irq, irq_handler_t handler,
125 irq_handler_t thread_fn,
126 unsigned long flags, const char *name, void *dev);
128 static inline int __must_check
129 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
130 const char *name, void *dev)
132 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
135 extern int __must_check
136 request_any_context_irq(unsigned int irq, irq_handler_t handler,
137 unsigned long flags, const char *name, void *dev_id);
139 extern void exit_irq_thread(void);
142 extern int __must_check
143 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
144 const char *name, void *dev);
147 * Special function to avoid ifdeffery in kernel/irq/devres.c which
148 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
149 * m68k). I really love these $@%#!* obvious Makefile references:
150 * ../../../kernel/irq/devres.o
152 static inline int __must_check
153 request_threaded_irq(unsigned int irq, irq_handler_t handler,
154 irq_handler_t thread_fn,
155 unsigned long flags, const char *name, void *dev)
157 return request_irq(irq, handler, flags, name, dev);
160 static inline int __must_check
161 request_any_context_irq(unsigned int irq, irq_handler_t handler,
162 unsigned long flags, const char *name, void *dev_id)
164 return request_irq(irq, handler, flags, name, dev_id);
167 static inline void exit_irq_thread(void) { }
170 extern void free_irq(unsigned int, void *);
174 extern int __must_check
175 devm_request_threaded_irq(struct device *dev, unsigned int irq,
176 irq_handler_t handler, irq_handler_t thread_fn,
177 unsigned long irqflags, const char *devname,
180 static inline int __must_check
181 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
182 unsigned long irqflags, const char *devname, void *dev_id)
184 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
188 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
191 * On lockdep we dont want to enable hardirqs in hardirq
192 * context. Use local_irq_enable_in_hardirq() to annotate
193 * kernel code that has to do this nevertheless (pretty much
194 * the only valid case is for old/broken hardware that is
197 * NOTE: in theory this might break fragile code that relies
198 * on hardirq delivery - in practice we dont seem to have such
199 * places left. So the only effect should be slightly increased
200 * irqs-off latencies.
202 #ifdef CONFIG_LOCKDEP
203 # define local_irq_enable_in_hardirq() do { } while (0)
205 # define local_irq_enable_in_hardirq() local_irq_enable()
208 extern void disable_irq_nosync(unsigned int irq);
209 extern void disable_irq(unsigned int irq);
210 extern void enable_irq(unsigned int irq);
212 /* The following three functions are for the core kernel use only. */
213 #ifdef CONFIG_GENERIC_HARDIRQS
214 extern void suspend_device_irqs(void);
215 extern void resume_device_irqs(void);
216 #ifdef CONFIG_PM_SLEEP
217 extern int check_wakeup_irqs(void);
219 static inline int check_wakeup_irqs(void) { return 0; }
222 static inline void suspend_device_irqs(void) { };
223 static inline void resume_device_irqs(void) { };
224 static inline int check_wakeup_irqs(void) { return 0; }
227 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
229 extern cpumask_var_t irq_default_affinity;
231 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
232 extern int irq_can_set_affinity(unsigned int irq);
233 extern int irq_select_affinity(unsigned int irq);
235 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
238 * struct irq_affinity_notify - context for notification of IRQ affinity changes
239 * @irq: Interrupt to which notification applies
240 * @kref: Reference count, for internal use
241 * @work: Work item, for internal use
242 * @notify: Function to be called on change. This will be
243 * called in process context.
244 * @release: Function to be called on release. This will be
245 * called in process context. Once registered, the
246 * structure must only be freed when this function is
249 struct irq_affinity_notify {
252 struct work_struct work;
253 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
254 void (*release)(struct kref *ref);
258 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
260 static inline void irq_run_affinity_notifiers(void)
262 flush_scheduled_work();
265 #else /* CONFIG_SMP */
267 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
272 static inline int irq_can_set_affinity(unsigned int irq)
277 static inline int irq_select_affinity(unsigned int irq) { return 0; }
279 static inline int irq_set_affinity_hint(unsigned int irq,
280 const struct cpumask *m)
284 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
286 #ifdef CONFIG_GENERIC_HARDIRQS
288 * Special lockdep variants of irq disabling/enabling.
289 * These should be used for locking constructs that
290 * know that a particular irq context which is disabled,
291 * and which is the only irq-context user of a lock,
292 * that it's safe to take the lock in the irq-disabled
293 * section without disabling hardirqs.
295 * On !CONFIG_LOCKDEP they are equivalent to the normal
296 * irq disable/enable methods.
298 static inline void disable_irq_nosync_lockdep(unsigned int irq)
300 disable_irq_nosync(irq);
301 #ifdef CONFIG_LOCKDEP
306 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
308 disable_irq_nosync(irq);
309 #ifdef CONFIG_LOCKDEP
310 local_irq_save(*flags);
314 static inline void disable_irq_lockdep(unsigned int irq)
317 #ifdef CONFIG_LOCKDEP
322 static inline void enable_irq_lockdep(unsigned int irq)
324 #ifdef CONFIG_LOCKDEP
330 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
332 #ifdef CONFIG_LOCKDEP
333 local_irq_restore(*flags);
338 /* IRQ wakeup (PM) control: */
339 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
341 static inline int enable_irq_wake(unsigned int irq)
343 return irq_set_irq_wake(irq, 1);
346 static inline int disable_irq_wake(unsigned int irq)
348 return irq_set_irq_wake(irq, 0);
351 #else /* !CONFIG_GENERIC_HARDIRQS */
353 * NOTE: non-genirq architectures, if they want to support the lock
354 * validator need to define the methods below in their asm/irq.h
355 * files, under an #ifdef CONFIG_LOCKDEP section.
357 #ifndef CONFIG_LOCKDEP
358 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
359 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
360 disable_irq_nosync(irq)
361 # define disable_irq_lockdep(irq) disable_irq(irq)
362 # define enable_irq_lockdep(irq) enable_irq(irq)
363 # define enable_irq_lockdep_irqrestore(irq, flags) \
367 static inline int enable_irq_wake(unsigned int irq)
372 static inline int disable_irq_wake(unsigned int irq)
376 #endif /* CONFIG_GENERIC_HARDIRQS */
379 #ifdef CONFIG_IRQ_FORCED_THREADING
380 extern bool force_irqthreads;
382 #define force_irqthreads (0)
385 #ifndef __ARCH_SET_SOFTIRQ_PENDING
386 #define set_softirq_pending(x) (local_softirq_pending() = (x))
387 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
390 /* Some architectures might implement lazy enabling/disabling of
391 * interrupts. In some cases, such as stop_machine, we might want
392 * to ensure that after a local_irq_disable(), interrupts have
393 * really been disabled in hardware. Such architectures need to
394 * implement the following hook.
396 #ifndef hard_irq_disable
397 #define hard_irq_disable() do { } while(0)
400 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
401 frequency threaded job scheduling. For almost all the purposes
402 tasklets are more than enough. F.e. all serial device BHs et
403 al. should be converted to tasklets, not to softirqs.
413 BLOCK_IOPOLL_SOFTIRQ,
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
422 /* map softirq index to softirq name. update 'softirq_to_name' in
423 * kernel/softirq.c when adding a new softirq.
425 extern char *softirq_to_name[NR_SOFTIRQS];
427 /* softirq mask and active fields moved to irq_cpustat_t in
428 * asm/hardirq.h to get better cache usage. KAO
431 struct softirq_action
433 void (*action)(struct softirq_action *);
436 asmlinkage void do_softirq(void);
437 asmlinkage void __do_softirq(void);
438 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
439 extern void softirq_init(void);
440 static inline void __raise_softirq_irqoff(unsigned int nr)
442 trace_softirq_raise(nr);
443 or_softirq_pending(1UL << nr);
446 extern void raise_softirq_irqoff(unsigned int nr);
447 extern void raise_softirq(unsigned int nr);
449 /* This is the worklist that queues up per-cpu softirq work.
451 * send_remote_sendirq() adds work to these lists, and
452 * the softirq handler itself dequeues from them. The queues
453 * are protected by disabling local cpu interrupts and they must
454 * only be accessed by the local cpu that they are for.
456 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
458 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
460 static inline struct task_struct *this_cpu_ksoftirqd(void)
462 return this_cpu_read(ksoftirqd);
465 /* Try to send a softirq to a remote cpu. If this cannot be done, the
466 * work will be queued to the local cpu.
468 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
470 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
471 * and compute the current cpu, passed in as 'this_cpu'.
473 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
474 int this_cpu, int softirq);
476 /* Tasklets --- multithreaded analogue of BHs.
478 Main feature differing them of generic softirqs: tasklet
479 is running only on one CPU simultaneously.
481 Main feature differing them of BHs: different tasklets
482 may be run simultaneously on different CPUs.
485 * If tasklet_schedule() is called, then tasklet is guaranteed
486 to be executed on some cpu at least once after this.
487 * If the tasklet is already scheduled, but its execution is still not
488 started, it will be executed only once.
489 * If this tasklet is already running on another CPU (or schedule is called
490 from tasklet itself), it is rescheduled for later.
491 * Tasklet is strictly serialized wrt itself, but not
492 wrt another tasklets. If client needs some intertask synchronization,
493 he makes it with spinlocks.
496 struct tasklet_struct
498 struct tasklet_struct *next;
501 void (*func)(unsigned long);
505 #define DECLARE_TASKLET(name, func, data) \
506 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
508 #define DECLARE_TASKLET_DISABLED(name, func, data) \
509 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
514 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
515 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
519 static inline int tasklet_trylock(struct tasklet_struct *t)
521 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
524 static inline void tasklet_unlock(struct tasklet_struct *t)
526 smp_mb__before_clear_bit();
527 clear_bit(TASKLET_STATE_RUN, &(t)->state);
530 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
532 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
535 #define tasklet_trylock(t) 1
536 #define tasklet_unlock_wait(t) do { } while (0)
537 #define tasklet_unlock(t) do { } while (0)
540 extern void __tasklet_schedule(struct tasklet_struct *t);
542 static inline void tasklet_schedule(struct tasklet_struct *t)
544 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
545 __tasklet_schedule(t);
548 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
550 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
552 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
553 __tasklet_hi_schedule(t);
556 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
559 * This version avoids touching any other tasklets. Needed for kmemcheck
560 * in order not to take any page faults while enqueueing this tasklet;
561 * consider VERY carefully whether you really need this or
562 * tasklet_hi_schedule()...
564 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
566 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
567 __tasklet_hi_schedule_first(t);
571 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
573 atomic_inc(&t->count);
574 smp_mb__after_atomic_inc();
577 static inline void tasklet_disable(struct tasklet_struct *t)
579 tasklet_disable_nosync(t);
580 tasklet_unlock_wait(t);
584 static inline void tasklet_enable(struct tasklet_struct *t)
586 smp_mb__before_atomic_dec();
587 atomic_dec(&t->count);
590 static inline void tasklet_hi_enable(struct tasklet_struct *t)
592 smp_mb__before_atomic_dec();
593 atomic_dec(&t->count);
596 extern void tasklet_kill(struct tasklet_struct *t);
597 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
598 extern void tasklet_init(struct tasklet_struct *t,
599 void (*func)(unsigned long), unsigned long data);
601 struct tasklet_hrtimer {
602 struct hrtimer timer;
603 struct tasklet_struct tasklet;
604 enum hrtimer_restart (*function)(struct hrtimer *);
608 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
609 enum hrtimer_restart (*function)(struct hrtimer *),
610 clockid_t which_clock, enum hrtimer_mode mode);
613 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
614 const enum hrtimer_mode mode)
616 return hrtimer_start(&ttimer->timer, time, mode);
620 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
622 hrtimer_cancel(&ttimer->timer);
623 tasklet_kill(&ttimer->tasklet);
627 * Autoprobing for irqs:
629 * probe_irq_on() and probe_irq_off() provide robust primitives
630 * for accurate IRQ probing during kernel initialization. They are
631 * reasonably simple to use, are not "fooled" by spurious interrupts,
632 * and, unlike other attempts at IRQ probing, they do not get hung on
633 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
635 * For reasonably foolproof probing, use them as follows:
637 * 1. clear and/or mask the device's internal interrupt.
639 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
640 * 4. enable the device and cause it to trigger an interrupt.
641 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
642 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
643 * 7. service the device to clear its pending interrupt.
644 * 8. loop again if paranoia is required.
646 * probe_irq_on() returns a mask of allocated irq's.
648 * probe_irq_off() takes the mask as a parameter,
649 * and returns the irq number which occurred,
650 * or zero if none occurred, or a negative irq number
651 * if more than one irq occurred.
654 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
655 static inline unsigned long probe_irq_on(void)
659 static inline int probe_irq_off(unsigned long val)
663 static inline unsigned int probe_irq_mask(unsigned long val)
668 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
669 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
670 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
673 #ifdef CONFIG_PROC_FS
674 /* Initialize /proc/irq/ */
675 extern void init_irq_proc(void);
677 static inline void init_irq_proc(void)
683 int show_interrupts(struct seq_file *p, void *v);
684 int arch_show_interrupts(struct seq_file *p, int prec);
686 extern int early_irq_init(void);
687 extern int arch_probe_nr_irqs(void);
688 extern int arch_early_irq_init(void);