2 * linux/arch/sh/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
7 * SuperH version: Copyright (C) 1999 Niibe Yutaka
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
14 #include <linux/ftrace.h>
15 #include <linux/delay.h>
16 #include <linux/ratelimit.h>
17 #include <asm/processor.h>
18 #include <asm/machvec.h>
19 #include <asm/uaccess.h>
20 #include <asm/thread_info.h>
21 #include <cpu/mmu_context.h>
23 atomic_t irq_err_count;
26 * 'what should we do if we get a hw irq event on an illegal vector'.
27 * each architecture has to answer this themselves, it doesn't deserve
28 * a generic callback i think.
30 void ack_bad_irq(unsigned int irq)
32 atomic_inc(&irq_err_count);
33 printk("unexpected IRQ trap at vector %02x\n", irq);
36 #if defined(CONFIG_PROC_FS)
38 * /proc/interrupts printing for arch specific interrupts
40 int arch_show_interrupts(struct seq_file *p, int prec)
44 seq_printf(p, "%*s: ", prec, "NMI");
45 for_each_online_cpu(j)
46 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
47 seq_printf(p, " Non-maskable interrupts\n");
49 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
55 #ifdef CONFIG_IRQSTACKS
57 * per-CPU IRQ handling contexts (thread information and stack)
60 struct thread_info tinfo;
61 u32 stack[THREAD_SIZE/sizeof(u32)];
64 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
65 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
67 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
68 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
70 static inline void handle_one_irq(unsigned int irq)
72 union irq_ctx *curctx, *irqctx;
74 curctx = (union irq_ctx *)current_thread_info();
75 irqctx = hardirq_ctx[smp_processor_id()];
78 * this is where we switch to the IRQ stack. However, if we are
79 * already using the IRQ stack (because we interrupted a hardirq
80 * handler) we can't do that and just have to keep using the
81 * current stack (which is the irq stack already after all)
83 if (curctx != irqctx) {
86 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
87 irqctx->tinfo.task = curctx->tinfo.task;
88 irqctx->tinfo.previous_sp = current_stack_pointer;
91 * Copy the softirq bits in preempt_count so that the
92 * softirq checks work in the hardirq context.
94 irqctx->tinfo.preempt_count =
95 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
96 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
98 __asm__ __volatile__ (
102 /* swith to the irq stack */
104 /* restore the stack (ring zero) */
107 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
108 : "memory", "r0", "r1", "r2", "r3", "r4",
109 "r5", "r6", "r7", "r8", "t", "pr"
112 generic_handle_irq(irq);
116 * allocate per-cpu stacks for hardirq and for softirq processing
118 void irq_ctx_init(int cpu)
120 union irq_ctx *irqctx;
122 if (hardirq_ctx[cpu])
125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
126 irqctx->tinfo.task = NULL;
127 irqctx->tinfo.exec_domain = NULL;
128 irqctx->tinfo.cpu = cpu;
129 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
130 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
132 hardirq_ctx[cpu] = irqctx;
134 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
135 irqctx->tinfo.task = NULL;
136 irqctx->tinfo.exec_domain = NULL;
137 irqctx->tinfo.cpu = cpu;
138 irqctx->tinfo.preempt_count = 0;
139 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
141 softirq_ctx[cpu] = irqctx;
143 printk("CPU %u irqstacks, hard=%p soft=%p\n",
144 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
147 void irq_ctx_exit(int cpu)
149 hardirq_ctx[cpu] = NULL;
152 void do_softirq_own_stack(void)
154 struct thread_info *curctx;
155 union irq_ctx *irqctx;
158 curctx = current_thread_info();
159 irqctx = softirq_ctx[smp_processor_id()];
160 irqctx->tinfo.task = curctx->task;
161 irqctx->tinfo.previous_sp = current_stack_pointer;
163 /* build the stack frame on the softirq stack */
164 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
166 __asm__ __volatile__ (
169 /* switch to the softirq stack */
171 /* restore the thread stack */
174 : "r" (__do_softirq), "r" (isp)
175 : "memory", "r0", "r1", "r2", "r3", "r4",
176 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
180 static inline void handle_one_irq(unsigned int irq)
182 generic_handle_irq(irq);
186 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
188 struct pt_regs *old_regs = set_irq_regs(regs);
192 irq = irq_demux(irq_lookup(irq));
194 if (irq != NO_IRQ_IGNORE) {
201 set_irq_regs(old_regs);
206 void __init init_IRQ(void)
210 /* Perform the machine specific initialisation */
211 if (sh_mv.mv_init_irq)
216 irq_ctx_init(smp_processor_id());
219 #ifdef CONFIG_HOTPLUG_CPU
220 static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
222 struct irq_desc *desc = irq_to_desc(irq);
223 struct irq_chip *chip = irq_data_get_irq_chip(data);
225 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
226 irq, data->node, cpu);
228 raw_spin_lock_irq(&desc->lock);
229 chip->irq_set_affinity(data, cpumask_of(cpu), false);
230 raw_spin_unlock_irq(&desc->lock);
234 * The CPU has been marked offline. Migrate IRQs off this CPU. If
235 * the affinity settings do not allow other CPUs, force them onto any
238 void migrate_irqs(void)
240 unsigned int irq, cpu = smp_processor_id();
242 for_each_active_irq(irq) {
243 struct irq_data *data = irq_get_irq_data(irq);
245 if (data->node == cpu) {
246 unsigned int newcpu = cpumask_any_and(data->affinity,
248 if (newcpu >= nr_cpu_ids) {
249 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
252 cpumask_setall(data->affinity);
253 newcpu = cpumask_any_and(data->affinity,
257 route_irq(data, irq, newcpu);