2 * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
3 * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
4 * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
5 * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
6 * 0xcf8 PCI configuration read/write.
8 * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
9 * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
10 * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/pci.h>
15 #include <linux/acpi.h>
18 #include <asm/io_apic.h>
19 #include <asm/pci_x86.h>
21 #include <asm/xen/hypervisor.h>
23 #include <xen/features.h>
24 #include <xen/events.h>
25 #include <asm/xen/pci.h>
27 static int xen_pcifront_enable_irq(struct pci_dev *dev)
34 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
36 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
40 /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
43 if (gsi < NR_IRQS_LEGACY)
46 rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
48 dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
54 dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
59 static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
62 int rc, pirq = -1, irq = -1;
63 struct physdev_map_pirq map_irq;
67 irq = xen_irq_from_gsi(gsi);
74 map_irq.domid = DOMID_SELF;
75 map_irq.type = MAP_PIRQ_TYPE_GSI;
79 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
81 printk(KERN_WARNING "xen map irq failed %d\n", rc);
85 if (triggering == ACPI_EDGE_SENSITIVE) {
90 name = "ioapic-level";
93 if (gsi_override >= 0)
96 irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
100 printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
105 static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
106 int trigger, int polarity)
108 if (!xen_hvm_domain())
111 return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
112 false /* no mapping of GSI to PIRQ */);
115 #ifdef CONFIG_XEN_DOM0
116 static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
119 struct physdev_setup_gsi setup_gsi;
121 if (!xen_pv_domain())
124 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
125 gsi, triggering, polarity);
127 irq = xen_register_pirq(gsi, gsi_override, triggering, true);
130 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
131 setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
133 rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
135 printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
137 printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
144 static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
145 int trigger, int polarity)
147 return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
152 #if defined(CONFIG_PCI_MSI)
153 #include <linux/msi.h>
154 #include <asm/msidef.h>
156 struct xen_pci_frontend_ops *xen_pci_frontend;
157 EXPORT_SYMBOL_GPL(xen_pci_frontend);
159 static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
162 struct msi_desc *msidesc;
165 if (type == PCI_CAP_ID_MSI && nvec > 1)
168 v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
172 if (type == PCI_CAP_ID_MSIX)
173 ret = xen_pci_frontend_enable_msix(dev, v, nvec);
175 ret = xen_pci_frontend_enable_msi(dev, v);
179 list_for_each_entry(msidesc, &dev->msi_list, list) {
180 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
181 (type == PCI_CAP_ID_MSIX) ?
195 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
201 #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
202 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
204 static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
207 /* We set vector == 0 to tell the hypervisor we don't care about it,
208 * but we want a pirq setup instead.
209 * We use the dest_id field to pass the pirq that we want. */
210 msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
213 MSI_ADDR_DEST_MODE_PHYSICAL |
214 MSI_ADDR_REDIRECTION_CPU |
215 MSI_ADDR_DEST_ID(pirq);
217 msg->data = XEN_PIRQ_MSI_DATA;
220 static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
223 struct msi_desc *msidesc;
226 if (type == PCI_CAP_ID_MSI && nvec > 1)
229 list_for_each_entry(msidesc, &dev->msi_list, list) {
230 __read_msi_msg(msidesc, &msg);
231 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
232 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
233 if (msg.data != XEN_PIRQ_MSI_DATA ||
234 xen_irq_from_pirq(pirq) < 0) {
235 pirq = xen_allocate_pirq_msi(dev, msidesc);
240 xen_msi_compose_msg(dev, pirq, &msg);
241 __write_msi_msg(msidesc, &msg);
242 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
245 "xen: msi already bound to pirq=%d\n", pirq);
247 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
248 (type == PCI_CAP_ID_MSIX) ?
254 "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
260 "Xen PCI frontend has not registered MSI/MSI-X support!\n");
264 #ifdef CONFIG_XEN_DOM0
265 static bool __read_mostly pci_seg_supported = true;
267 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
270 struct msi_desc *msidesc;
272 if (type == PCI_CAP_ID_MSI && nvec > 1)
275 list_for_each_entry(msidesc, &dev->msi_list, list) {
276 struct physdev_map_pirq map_irq;
279 domid = ret = xen_find_device_domain_owner(dev);
280 /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
281 * hence check ret value for < 0. */
285 memset(&map_irq, 0, sizeof(map_irq));
286 map_irq.domid = domid;
287 map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
290 map_irq.bus = dev->bus->number |
291 (pci_domain_nr(dev->bus) << 16);
292 map_irq.devfn = dev->devfn;
294 if (type == PCI_CAP_ID_MSIX) {
296 u32 table_offset, bir;
298 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
300 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
302 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
304 map_irq.table_base = pci_resource_start(dev, bir);
305 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
309 if (pci_seg_supported)
310 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
312 if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
313 map_irq.type = MAP_PIRQ_TYPE_MSI;
316 map_irq.bus = dev->bus->number;
317 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
320 pci_seg_supported = false;
323 dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
328 ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
329 map_irq.pirq, map_irq.index,
330 (type == PCI_CAP_ID_MSIX) ?
341 static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq)
345 if (pci_seg_supported) {
346 struct physdev_pci_device restore_ext;
348 restore_ext.seg = pci_domain_nr(dev->bus);
349 restore_ext.bus = dev->bus->number;
350 restore_ext.devfn = dev->devfn;
351 ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
354 pci_seg_supported = false;
355 WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
357 if (!pci_seg_supported) {
358 struct physdev_restore_msi restore;
360 restore.bus = dev->bus->number;
361 restore.devfn = dev->devfn;
362 ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
363 WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
368 static void xen_teardown_msi_irqs(struct pci_dev *dev)
370 struct msi_desc *msidesc;
372 msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
373 if (msidesc->msi_attrib.is_msix)
374 xen_pci_frontend_disable_msix(dev);
376 xen_pci_frontend_disable_msi(dev);
378 /* Free the IRQ's and the msidesc using the generic code. */
379 default_teardown_msi_irqs(dev);
382 static void xen_teardown_msi_irq(unsigned int irq)
384 xen_destroy_irq(irq);
389 int __init pci_xen_init(void)
391 if (!xen_pv_domain() || xen_initial_domain())
394 printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
396 pcibios_set_cache_line_size();
398 pcibios_enable_irq = xen_pcifront_enable_irq;
399 pcibios_disable_irq = NULL;
402 /* Keep ACPI out of the picture */
406 #ifdef CONFIG_PCI_MSI
407 x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
408 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
409 x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
414 int __init pci_xen_hvm_init(void)
416 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
421 * We don't want to change the actual ACPI delivery model,
422 * just how GSIs get registered.
424 __acpi_register_gsi = acpi_register_gsi_xen_hvm;
427 #ifdef CONFIG_PCI_MSI
428 x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
429 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
434 #ifdef CONFIG_XEN_DOM0
435 static __init void xen_setup_acpi_sci(void)
438 int trigger, polarity;
439 int gsi = acpi_sci_override_gsi;
441 int gsi_override = -1;
446 rc = acpi_get_override_irq(gsi, &trigger, &polarity);
448 printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
449 " sci, rc=%d\n", rc);
452 trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
453 polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
455 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
456 "polarity=%d\n", gsi, trigger, polarity);
458 /* Before we bind the GSI to a Linux IRQ, check whether
459 * we need to override it with bus_irq (IRQ) value. Usually for
460 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
461 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
462 * but there are oddballs where the IRQ != GSI:
463 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
464 * which ends up being: gsi_to_irq[9] == 20
465 * (which is what acpi_gsi_to_irq ends up calling when starting the
466 * the ACPI interpreter and keels over since IRQ 9 has not been
467 * setup as we had setup IRQ 20 for it).
469 if (acpi_gsi_to_irq(gsi, &irq) == 0) {
470 /* Use the provided value if it's valid. */
475 gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
476 printk(KERN_INFO "xen: acpi sci %d\n", gsi);
481 int __init pci_xen_initial_domain(void)
485 #ifdef CONFIG_PCI_MSI
486 x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
487 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
488 x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
490 xen_setup_acpi_sci();
491 __acpi_register_gsi = acpi_register_gsi_xen;
492 /* Pre-allocate legacy irqs */
493 for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
494 int trigger, polarity;
496 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
499 xen_register_pirq(irq, -1 /* no GSI override */,
500 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
501 true /* Map GSI to PIRQ */);
503 if (0 == nr_ioapics) {
504 for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
505 xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
510 struct xen_device_domain_owner {
513 struct list_head list;
516 static DEFINE_SPINLOCK(dev_domain_list_spinlock);
517 static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
519 static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
521 struct xen_device_domain_owner *owner;
523 list_for_each_entry(owner, &dev_domain_list, list) {
524 if (owner->dev == dev)
530 int xen_find_device_domain_owner(struct pci_dev *dev)
532 struct xen_device_domain_owner *owner;
533 int domain = -ENODEV;
535 spin_lock(&dev_domain_list_spinlock);
536 owner = find_device(dev);
538 domain = owner->domain;
539 spin_unlock(&dev_domain_list_spinlock);
542 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
544 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
546 struct xen_device_domain_owner *owner;
548 owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
552 spin_lock(&dev_domain_list_spinlock);
553 if (find_device(dev)) {
554 spin_unlock(&dev_domain_list_spinlock);
558 owner->domain = domain;
560 list_add_tail(&owner->list, &dev_domain_list);
561 spin_unlock(&dev_domain_list_spinlock);
564 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
566 int xen_unregister_device_domain_owner(struct pci_dev *dev)
568 struct xen_device_domain_owner *owner;
570 spin_lock(&dev_domain_list_spinlock);
571 owner = find_device(dev);
573 spin_unlock(&dev_domain_list_spinlock);
576 list_del(&owner->list);
577 spin_unlock(&dev_domain_list_spinlock);
581 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);