Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / arch / x86 / kernel / irq.c
index 9375dce..664bcb7 100644 (file)
@@ -63,10 +63,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
        seq_printf(p, "  Spurious interrupts\n");
-       seq_printf(p, "%*s: ", prec, "CNT");
+       seq_printf(p, "%*s: ", prec, "PMI");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
-       seq_printf(p, "  Performance counter interrupts\n");
+       seq_printf(p, "  Performance monitoring interrupts\n");
        seq_printf(p, "%*s: ", prec, "PND");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
@@ -92,17 +92,17 @@ static int show_other_interrupts(struct seq_file *p, int prec)
                seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
        seq_printf(p, "  TLB shootdowns\n");
 #endif
-#ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_THERMAL_VECTOR
        seq_printf(p, "%*s: ", prec, "TRM");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
        seq_printf(p, "  Thermal event interrupts\n");
-# ifdef CONFIG_X86_MCE_THRESHOLD
+#endif
+#ifdef CONFIG_X86_MCE_THRESHOLD
        seq_printf(p, "%*s: ", prec, "THR");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
        seq_printf(p, "  Threshold APIC interrupts\n");
-# endif
 #endif
 #ifdef CONFIG_X86_MCE
        seq_printf(p, "%*s: ", prec, "MCE");
@@ -194,11 +194,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
        sum += irq_stats(cpu)->irq_call_count;
        sum += irq_stats(cpu)->irq_tlb_count;
 #endif
-#ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_THERMAL_VECTOR
        sum += irq_stats(cpu)->irq_thermal_count;
-# ifdef CONFIG_X86_MCE_THRESHOLD
+#endif
+#ifdef CONFIG_X86_MCE_THRESHOLD
        sum += irq_stats(cpu)->irq_threshold_count;
-# endif
 #endif
 #ifdef CONFIG_X86_MCE
        sum += per_cpu(mce_exception_count, cpu);
@@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
                                __func__, smp_processor_id(), vector, irq);
        }
 
-       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
@@ -269,10 +268,99 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
        if (x86_platform_ipi_callback)
                x86_platform_ipi_callback();
 
-       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
 }
 
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
+void fixup_irqs(void)
+{
+       unsigned int irq, vector;
+       static int warned;
+       struct irq_desc *desc;
+
+       for_each_irq_desc(irq, desc) {
+               int break_affinity = 0;
+               int set_affinity = 1;
+               const struct cpumask *affinity;
+
+               if (!desc)
+                       continue;
+               if (irq == 2)
+                       continue;
+
+               /* interrupt's are disabled at this point */
+               spin_lock(&desc->lock);
+
+               affinity = desc->affinity;
+               if (!irq_has_action(irq) ||
+                   cpumask_equal(affinity, cpu_online_mask)) {
+                       spin_unlock(&desc->lock);
+                       continue;
+               }
+
+               /*
+                * Complete the irq move. This cpu is going down and for
+                * non intr-remapping case, we can't wait till this interrupt
+                * arrives at this cpu before completing the irq move.
+                */
+               irq_force_complete_move(irq);
+
+               if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+                       break_affinity = 1;
+                       affinity = cpu_all_mask;
+               }
+
+               if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
+                       desc->chip->mask(irq);
+
+               if (desc->chip->set_affinity)
+                       desc->chip->set_affinity(irq, affinity);
+               else if (!(warned++))
+                       set_affinity = 0;
+
+               if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
+                       desc->chip->unmask(irq);
+
+               spin_unlock(&desc->lock);
+
+               if (break_affinity && set_affinity)
+                       printk("Broke affinity for irq %i\n", irq);
+               else if (!set_affinity)
+                       printk("Cannot set affinity for irq %i\n", irq);
+       }
+
+       /*
+        * We can remove mdelay() and then send spuriuous interrupts to
+        * new cpu targets for all the irqs that were handled previously by
+        * this cpu. While it works, I have seen spurious interrupt messages
+        * (nothing wrong but still...).
+        *
+        * So for now, retain mdelay(1) and check the IRR and then send those
+        * interrupts to new targets as this cpu is already offlined...
+        */
+       mdelay(1);
+
+       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+               unsigned int irr;
+
+               if (__get_cpu_var(vector_irq)[vector] < 0)
+                       continue;
+
+               irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+               if (irr  & (1 << (vector % 32))) {
+                       irq = __get_cpu_var(vector_irq)[vector];
+
+                       desc = irq_to_desc(irq);
+                       spin_lock(&desc->lock);
+                       if (desc->chip->retrigger)
+                               desc->chip->retrigger(irq);
+                       spin_unlock(&desc->lock);
+               }
+       }
+}
+#endif