/* Handle bad interrupts */
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
* a little cheaper to use a separate counter in the PDA (short of
* moving irq_enter into assembly, which would be too much work)
*/
-1: incl %gs:pda_irqcount
+1: incl PER_CPU_VAR(irq_count)
jne 2f
popq_cfi %rax /* move return address... */
- mov %gs:pda_irqstackptr,%rsp
+ mov PER_CPU_VAR(irq_stack_ptr),%rsp
EMPTY_FRAME 0
+ pushq_cfi %rbp /* backlink for unwinder */
pushq_cfi %rax /* ... to the new stack */
/*
* We entered an interrupt context - irqs are off:
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
-#endif
+# ifdef CONFIG_PERF_COUNTERS
+ alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
+# endif
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
+# ifdef CONFIG_X86_MCE_P4THERMAL
/* thermal monitor LVT interrupt */
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+# endif
#endif
+}
+
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
+{
+ int i;
+
+ /* all the set up before the call gates are initialised */
+ pre_intr_init_hook();
+
+ apic_intr_init();
+
+ /*
+ * Cover the whole vector space, no vector can escape
+ * us. (some of these will be overridden and become
+ * 'special' SMP interrupts)
+ */
+ for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
+ int vector = FIRST_EXTERNAL_VECTOR + i;
+ /* SYSCALL_VECTOR was reserved in trap_init. */
+ if (!test_bit(vector, used_vectors))
+ set_intr_gate(vector, interrupt[i]);
+ }
- if (!acpi_ioapic)
- setup_irq(2, &irq2);
-
/* setup after call gates are initialised (usually add in
* the architecture specific gates)
*/
static inline void setup_node_to_cpumask_map(void) { }
#endif
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
+#ifdef CONFIG_X86_64
+
+/* correctly size the local cpu masks */
- static void setup_cpu_local_masks(void)
++static void __init setup_cpu_local_masks(void)
+{
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
+#else /* CONFIG_X86_32 */
+
+static inline void setup_cpu_local_masks(void)
+{
+}
+
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
/*
* Copy data used in early init routines from the initial arrays to the
* per cpu data areas. These arrays then become expendable and the
static void __cpuinit voyager_smp_prepare_boot_cpu(void)
{
init_gdt(smp_processor_id());
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
switch_to_new_gdt();
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_callout_map);
- cpu_set(smp_processor_id(), cpu_possible_map);
- cpu_set(smp_processor_id(), cpu_present_map);
+ cpu_online_map = cpumask_of_cpu(smp_processor_id());
+ cpu_callout_map = cpumask_of_cpu(smp_processor_id());
+ cpu_callin_map = CPU_MASK_NONE;
+ cpu_present_map = cpumask_of_cpu(smp_processor_id());
+
}
static int __cpuinit voyager_cpu_up(unsigned int cpu)
void __init smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- x86_write_percpu(cpu_number, hard_smp_processor_id());
+ percpu_write(cpu_number, hard_smp_processor_id());
}
- static void voyager_send_call_func(cpumask_t callmask)
+ static void voyager_send_call_func(const struct cpumask *callmask)
{
- __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+ __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
}
/* get the address */
address = read_cr2();
- if (unlikely(notify_page_fault(regs)))
- return;
- si_code = SEGV_MAPERR;
-
if (unlikely(kmmio_fault(regs, address)))
return;
return;
/* Can handle a stale RO->RW TLB */
- if (spurious_fault(address, error_code))
+ if (spurious_fault(error_code, address))
return;
+ /* kprobes don't want to hook the spurious faults. */
+ if (notify_page_fault(regs))
+ return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
*/
- goto bad_area_nosemaphore;
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
- /* kprobes don't want to hook the spurious faults. */
- if (notify_page_fault(regs))
++ if (unlikely(notify_page_fault(regs)))
+ return;
-
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
++ u64 perf_flags;
++
+ u64 pctrl;
+
/* Don't trace irqs off for idle */
stop_critical_timings();
++ perf_flags = hw_perf_save_disable();
+ pctrl = hw_perf_save_disable();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
++ hw_perf_restore(perf_flags);
+ hw_perf_restore(pctrl);
start_critical_timings();
}
int legacy_count;
int i;
+ init_irq_default_affinity();
+
+ /* initialize nr_irqs based on nr_cpu_ids */
+ arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
int count;
int i;
+ init_irq_default_affinity();
+
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
desc = old_desc;
goto out_unlock;
}
- init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc;
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);