struct IO_APIC_route_entry *entry,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr);
+extern int intr_set_affinity(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
#else /* CONFIG_IRQ_REMAP */
{
return -ENODEV;
}
+static inline int intr_set_affinity(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force)
+{
+ return 0;
+}
#endif /* CONFIG_IRQ_REMAP */
#endif /* __X86_INTR_REMAPPING_H */
return ret;
}
-#ifdef CONFIG_IRQ_REMAP
-
-/*
- * Migrate the IO-APIC irq in the presence of intr-remapping.
- *
- * For both level and edge triggered, irq migration is a simple atomic
- * update(of vector and cpu destination) of IRTE and flush the hardware cache.
- *
- * For level triggered, we eliminate the io-apic RTE modification (with the
- * updated vector information), by using a virtual vector (io-apic pin number).
- * Real vector that is used for interrupting cpu will be coming from
- * the interrupt-remapping table entry.
- *
- * As the migration is a simple atomic update of IRTE, the same mechanism
- * is used to migrate MSI irq's in the presence of interrupt-remapping.
- */
-static int
-ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- struct irq_cfg *cfg = data->chip_data;
- unsigned int dest, irq = data->irq;
- struct irte irte;
-
- if (!cpumask_intersects(mask, cpu_online_mask))
- return -EINVAL;
-
- if (get_irte(irq, &irte))
- return -EBUSY;
-
- if (assign_irq_vector(irq, cfg, mask))
- return -EBUSY;
-
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
-
- irte.vector = cfg->vector;
- irte.dest_id = IRTE_DEST(dest);
-
- /*
- * Atomically updates the IRTE with the new destination, vector
- * and flushes the interrupt entry cache.
- */
- modify_irte(irq, &irte);
-
- /*
- * After this point, all the interrupts will start arriving
- * at the new destination. So, time to cleanup the previous
- * vector allocation.
- */
- if (cfg->move_in_progress)
- send_cleanup_vector(cfg);
-
- cpumask_copy(data->affinity, mask);
- return 0;
-}
-
-#else
-static inline int
-ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- return 0;
-}
-#endif
-
asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
chip->irq_eoi = ir_ack_apic_level;
#ifdef CONFIG_SMP
- chip->irq_set_affinity = ir_ioapic_set_affinity;
+ chip->irq_set_affinity = intr_set_affinity;
#endif
}
#endif /* CONFIG_IRQ_REMAP */
mask = apic->target_cpus();
if (intr_remapping_enabled)
- ir_ioapic_set_affinity(idata, mask, false);
+ intr_set_affinity(idata, mask, false);
else
ioapic_set_affinity(idata, mask, false);
}
return 0;
}
+/*
+ * Migrate the IO-APIC irq in the presence of intr-remapping.
+ *
+ * For both level and edge triggered, irq migration is a simple atomic
+ * update(of vector and cpu destination) of IRTE and flush the hardware cache.
+ *
+ * For level triggered, we eliminate the io-apic RTE modification (with the
+ * updated vector information), by using a virtual vector (io-apic pin number).
+ * Real vector that is used for interrupting cpu will be coming from
+ * the interrupt-remapping table entry.
+ *
+ * As the migration is a simple atomic update of IRTE, the same mechanism
+ * is used to migrate MSI irq's in the presence of interrupt-remapping.
+ */
+static int
+intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_cfg *cfg = data->chip_data;
+ unsigned int dest, irq = data->irq;
+ struct irte irte;
+
+ if (!cpumask_intersects(mask, cpu_online_mask))
+ return -EINVAL;
+
+ if (get_irte(irq, &irte))
+ return -EBUSY;
+
+ if (assign_irq_vector(irq, cfg, mask))
+ return -EBUSY;
+
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+
+ irte.vector = cfg->vector;
+ irte.dest_id = IRTE_DEST(dest);
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ modify_irte(irq, &irte);
+
+ /*
+ * After this point, all the interrupts will start arriving
+ * at the new destination. So, time to cleanup the previous
+ * vector allocation.
+ */
+ if (cfg->move_in_progress)
+ send_cleanup_vector(cfg);
+
+ cpumask_copy(data->affinity, mask);
+ return 0;
+}
struct irq_remap_ops intel_irq_remap_ops = {
.supported = intel_intr_remapping_supported,
.hardware_reenable = reenable_intr_remapping,
.enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry,
+ .set_affinity = intel_ioapic_set_affinity,
};
return remap_ops->setup_ioapic_entry(irq, entry, destination,
vector, attr);
}
+
+int intr_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ if (!remap_ops || !remap_ops->set_affinity)
+ return 0;
+
+ return remap_ops->set_affinity(data, mask, force);
+}
struct IO_APIC_route_entry;
struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
extern int disable_intremap;
extern int disable_sourceid_checking;
int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *,
unsigned int, int,
struct io_apic_irq_attr *);
+
+ /* Set the CPU affinity of a remapped interrupt */
+ int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
+ bool force);
};
extern struct irq_remap_ops intel_irq_remap_ops;