2 * linux/arch/ia64/kernel/irq_ia64.c
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
17 #include <linux/module.h>
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/slab.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h> /* for rand_initialize_irq() */
28 #include <linux/signal.h>
29 #include <linux/smp.h>
30 #include <linux/threads.h>
31 #include <linux/bitops.h>
32 #include <linux/irq.h>
34 #include <asm/delay.h>
35 #include <asm/intrinsics.h>
37 #include <asm/hw_irq.h>
38 #include <asm/machvec.h>
39 #include <asm/pgtable.h>
40 #include <asm/system.h>
41 #include <asm/tlbflush.h>
44 # include <asm/perfmon.h>
49 #define IRQ_VECTOR_UNASSIGNED (0)
51 #define IRQ_UNUSED (0)
55 /* These can be overridden in platform_irq_init */
56 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
57 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
59 /* default base addr of IPI table */
60 void __iomem *ipi_base_addr = ((void __iomem *)
61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
63 static cpumask_t vector_allocation_domain(int cpu);
66 * Legacy IRQ to IA-64 vector translation table.
68 __u8 isa_irq_to_vector_map[16] = {
69 /* 8259 IRQ translation, first 16 entries */
70 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
71 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
73 EXPORT_SYMBOL(isa_irq_to_vector_map);
75 DEFINE_SPINLOCK(vector_lock);
77 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
84 DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = -1
88 static cpumask_t vector_table[IA64_NUM_VECTORS] = {
89 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
92 static int irq_status[NR_IRQS] = {
93 [0 ... NR_IRQS -1] = IRQ_UNUSED
96 int check_irq_used(int irq)
98 if (irq_status[irq] == IRQ_USED)
104 static inline int find_unassigned_irq(void)
108 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
109 if (irq_status[irq] == IRQ_UNUSED)
114 static inline int find_unassigned_vector(cpumask_t domain)
119 cpus_and(mask, domain, cpu_online_map);
120 if (cpus_empty(mask))
123 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
124 vector = IA64_FIRST_DEVICE_VECTOR + pos;
125 cpus_and(mask, domain, vector_table[vector]);
126 if (!cpus_empty(mask))
133 static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
137 struct irq_cfg *cfg = &irq_cfg[irq];
139 BUG_ON((unsigned)irq >= NR_IRQS);
140 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
142 cpus_and(mask, domain, cpu_online_map);
143 if (cpus_empty(mask))
145 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
147 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
149 for_each_cpu_mask(cpu, mask)
150 per_cpu(vector_irq, cpu)[vector] = irq;
151 cfg->vector = vector;
152 cfg->domain = domain;
153 irq_status[irq] = IRQ_USED;
154 cpus_or(vector_table[vector], vector_table[vector], domain);
158 int bind_irq_vector(int irq, int vector, cpumask_t domain)
163 spin_lock_irqsave(&vector_lock, flags);
164 ret = __bind_irq_vector(irq, vector, domain);
165 spin_unlock_irqrestore(&vector_lock, flags);
169 static void __clear_irq_vector(int irq)
174 struct irq_cfg *cfg = &irq_cfg[irq];
176 BUG_ON((unsigned)irq >= NR_IRQS);
177 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
178 vector = cfg->vector;
179 domain = cfg->domain;
180 cpus_and(mask, cfg->domain, cpu_online_map);
181 for_each_cpu_mask(cpu, mask)
182 per_cpu(vector_irq, cpu)[vector] = -1;
183 cfg->vector = IRQ_VECTOR_UNASSIGNED;
184 cfg->domain = CPU_MASK_NONE;
185 irq_status[irq] = IRQ_UNUSED;
186 cpus_andnot(vector_table[vector], vector_table[vector], domain);
189 static void clear_irq_vector(int irq)
193 spin_lock_irqsave(&vector_lock, flags);
194 __clear_irq_vector(irq);
195 spin_unlock_irqrestore(&vector_lock, flags);
199 ia64_native_assign_irq_vector (int irq)
203 cpumask_t domain = CPU_MASK_NONE;
207 spin_lock_irqsave(&vector_lock, flags);
208 for_each_online_cpu(cpu) {
209 domain = vector_allocation_domain(cpu);
210 vector = find_unassigned_vector(domain);
216 if (irq == AUTO_ASSIGN)
218 BUG_ON(__bind_irq_vector(irq, vector, domain));
220 spin_unlock_irqrestore(&vector_lock, flags);
225 ia64_native_free_irq_vector (int vector)
227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR)
230 clear_irq_vector(vector);
234 reserve_irq_vector (int vector)
236 if (vector < IA64_FIRST_DEVICE_VECTOR ||
237 vector > IA64_LAST_DEVICE_VECTOR)
239 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
243 * Initialize vector_irq on a new cpu. This function must be called
244 * with vector_lock held.
246 void __setup_vector_irq(int cpu)
250 /* Clear vector_irq */
251 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
252 per_cpu(vector_irq, cpu)[vector] = -1;
253 /* Mark the inuse vectors */
254 for (irq = 0; irq < NR_IRQS; ++irq) {
255 if (!cpu_isset(cpu, irq_cfg[irq].domain))
257 vector = irq_to_vector(irq);
258 per_cpu(vector_irq, cpu)[vector] = irq;
262 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
264 static enum vector_domain_type {
267 } vector_domain_type = VECTOR_DOMAIN_NONE;
269 static cpumask_t vector_allocation_domain(int cpu)
271 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
272 return cpumask_of_cpu(cpu);
276 static int __irq_prepare_move(int irq, int cpu)
278 struct irq_cfg *cfg = &irq_cfg[irq];
282 if (cfg->move_in_progress || cfg->move_cleanup_count)
284 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
286 if (cpu_isset(cpu, cfg->domain))
288 domain = vector_allocation_domain(cpu);
289 vector = find_unassigned_vector(domain);
292 cfg->move_in_progress = 1;
293 cfg->old_domain = cfg->domain;
294 cfg->vector = IRQ_VECTOR_UNASSIGNED;
295 cfg->domain = CPU_MASK_NONE;
296 BUG_ON(__bind_irq_vector(irq, vector, domain));
300 int irq_prepare_move(int irq, int cpu)
305 spin_lock_irqsave(&vector_lock, flags);
306 ret = __irq_prepare_move(irq, cpu);
307 spin_unlock_irqrestore(&vector_lock, flags);
311 void irq_complete_move(unsigned irq)
313 struct irq_cfg *cfg = &irq_cfg[irq];
314 cpumask_t cleanup_mask;
317 if (likely(!cfg->move_in_progress))
320 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
323 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
324 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
325 for_each_cpu_mask(i, cleanup_mask)
326 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
327 cfg->move_in_progress = 0;
330 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
332 int me = smp_processor_id();
336 for (vector = IA64_FIRST_DEVICE_VECTOR;
337 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
339 struct irq_desc *desc;
341 irq = __get_cpu_var(vector_irq)[vector];
345 desc = irq_desc + irq;
347 raw_spin_lock(&desc->lock);
348 if (!cfg->move_cleanup_count)
351 if (!cpu_isset(me, cfg->old_domain))
354 spin_lock_irqsave(&vector_lock, flags);
355 __get_cpu_var(vector_irq)[vector] = -1;
356 cpu_clear(me, vector_table[vector]);
357 spin_unlock_irqrestore(&vector_lock, flags);
358 cfg->move_cleanup_count--;
360 raw_spin_unlock(&desc->lock);
365 static struct irqaction irq_move_irqaction = {
366 .handler = smp_irq_move_cleanup_interrupt,
367 .flags = IRQF_DISABLED,
371 static int __init parse_vector_domain(char *arg)
375 if (!strcmp(arg, "percpu")) {
376 vector_domain_type = VECTOR_DOMAIN_PERCPU;
381 early_param("vector", parse_vector_domain);
383 static cpumask_t vector_allocation_domain(int cpu)
390 void destroy_and_reserve_irq(unsigned int irq)
394 dynamic_irq_cleanup(irq);
396 spin_lock_irqsave(&vector_lock, flags);
397 __clear_irq_vector(irq);
398 irq_status[irq] = IRQ_RSVD;
399 spin_unlock_irqrestore(&vector_lock, flags);
403 * Dynamic irq allocate and deallocation for MSI
408 int irq, vector, cpu;
409 cpumask_t domain = CPU_MASK_NONE;
411 irq = vector = -ENOSPC;
412 spin_lock_irqsave(&vector_lock, flags);
413 for_each_online_cpu(cpu) {
414 domain = vector_allocation_domain(cpu);
415 vector = find_unassigned_vector(domain);
421 irq = find_unassigned_irq();
424 BUG_ON(__bind_irq_vector(irq, vector, domain));
426 spin_unlock_irqrestore(&vector_lock, flags);
428 dynamic_irq_init(irq);
432 void destroy_irq(unsigned int irq)
434 dynamic_irq_cleanup(irq);
435 clear_irq_vector(irq);
439 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
440 # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
442 # define IS_RESCHEDULE(vec) (0)
443 # define IS_LOCAL_TLB_FLUSH(vec) (0)
446 * That's where the IVT branches when we get an external
447 * interrupt. This branches to the correct hardware IRQ handler via
451 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
453 struct pt_regs *old_regs = set_irq_regs(regs);
454 unsigned long saved_tpr;
458 unsigned long bsp, sp;
461 * Note: if the interrupt happened while executing in
462 * the context switch routine (ia64_switch_to), we may
463 * get a spurious stack overflow here. This is
464 * because the register and the memory stack are not
465 * switched atomically.
467 bsp = ia64_getreg(_IA64_REG_AR_BSP);
468 sp = ia64_getreg(_IA64_REG_SP);
470 if ((sp - bsp) < 1024) {
471 static unsigned char count;
472 static long last_time;
474 if (time_after(jiffies, last_time + 5 * HZ))
478 printk("ia64_handle_irq: DANGER: less than "
479 "1KB of free stack space!!\n"
480 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
484 #endif /* IRQ_DEBUG */
487 * Always set TPR to limit maximum interrupt nesting depth to
488 * 16 (without this, it would be ~240, which could easily lead
489 * to kernel stack overflows).
492 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
494 while (vector != IA64_SPURIOUS_INT_VECTOR) {
495 int irq = local_vector_to_irq(vector);
496 struct irq_desc *desc = irq_to_desc(irq);
498 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
499 smp_local_flush_tlb();
500 kstat_incr_irqs_this_cpu(irq, desc);
501 } else if (unlikely(IS_RESCHEDULE(vector))) {
502 kstat_incr_irqs_this_cpu(irq, desc);
504 ia64_setreg(_IA64_REG_CR_TPR, vector);
507 if (unlikely(irq < 0)) {
508 printk(KERN_ERR "%s: Unexpected interrupt "
509 "vector %d on CPU %d is not mapped "
510 "to any IRQ!\n", __func__, vector,
513 generic_handle_irq(irq);
516 * Disable interrupts and send EOI:
519 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
522 vector = ia64_get_ivr();
525 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
526 * handler needs to be able to wait for further keyboard interrupts, which can't
527 * come through until ia64_eoi() has been done.
530 set_irq_regs(old_regs);
533 #ifdef CONFIG_HOTPLUG_CPU
535 * This function emulates a interrupt processing when a cpu is about to be
538 void ia64_process_pending_intr(void)
541 unsigned long saved_tpr;
542 extern unsigned int vectors_in_migration[NR_IRQS];
544 vector = ia64_get_ivr();
547 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
551 * Perform normal interrupt style processing
553 while (vector != IA64_SPURIOUS_INT_VECTOR) {
554 int irq = local_vector_to_irq(vector);
555 struct irq_desc *desc = irq_to_desc(irq);
557 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
558 smp_local_flush_tlb();
559 kstat_incr_irqs_this_cpu(irq, desc);
560 } else if (unlikely(IS_RESCHEDULE(vector))) {
561 kstat_incr_irqs_this_cpu(irq, desc);
563 struct pt_regs *old_regs = set_irq_regs(NULL);
565 ia64_setreg(_IA64_REG_CR_TPR, vector);
569 * Now try calling normal ia64_handle_irq as it would have got called
570 * from a real intr handler. Try passing null for pt_regs, hopefully
571 * it will work. I hope it works!.
572 * Probably could shared code.
574 if (unlikely(irq < 0)) {
575 printk(KERN_ERR "%s: Unexpected interrupt "
576 "vector %d on CPU %d not being mapped "
577 "to any IRQ!!\n", __func__, vector,
580 vectors_in_migration[irq]=0;
581 generic_handle_irq(irq);
583 set_irq_regs(old_regs);
586 * Disable interrupts and send EOI
589 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
592 vector = ia64_get_ivr();
601 static irqreturn_t dummy_handler (int irq, void *dev_id)
606 static struct irqaction ipi_irqaction = {
607 .handler = handle_IPI,
608 .flags = IRQF_DISABLED,
613 * KVM uses this interrupt to force a cpu out of guest mode
615 static struct irqaction resched_irqaction = {
616 .handler = dummy_handler,
617 .flags = IRQF_DISABLED,
621 static struct irqaction tlb_irqaction = {
622 .handler = dummy_handler,
623 .flags = IRQF_DISABLED,
630 ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
632 struct irq_desc *desc;
636 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
637 desc = irq_desc + irq;
638 desc->status |= IRQ_PER_CPU;
639 desc->chip = &irq_type_ia64_lsapic;
641 setup_irq(irq, action);
645 ia64_native_register_ipi(void)
648 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
649 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
650 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
658 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
661 if (vector_domain_type != VECTOR_DOMAIN_NONE)
662 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
665 #ifdef CONFIG_PERFMON
672 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
674 void __iomem *ipi_addr;
675 unsigned long ipi_data;
676 unsigned long phys_cpu_id;
678 phys_cpu_id = cpu_physical_id(cpu);
681 * cpu number is in 8bit ID and 8bit EID
684 ipi_data = (delivery_mode << 8) | (vector & 0xff);
685 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
687 writeq(ipi_data, ipi_addr);