1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
17 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
18 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
19 static int ir_ioapic_num, ir_hpet_num;
20 int intr_remapping_enabled;
22 static int disable_intremap;
23 static int disable_sourceid_checking;
24 static int no_x2apic_optout;
26 static __init int setup_nointremap(char *str)
31 early_param("nointremap", setup_nointremap);
33 static __init int setup_intremap(char *str)
39 if (!strncmp(str, "on", 2))
41 else if (!strncmp(str, "off", 3))
43 else if (!strncmp(str, "nosid", 5))
44 disable_sourceid_checking = 1;
45 else if (!strncmp(str, "no_x2apic_optout", 16))
48 str += strcspn(str, ",");
55 early_param("intremap", setup_intremap);
57 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
59 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
61 struct irq_cfg *cfg = irq_get_chip_data(irq);
62 return cfg ? &cfg->irq_2_iommu : NULL;
65 int get_irte(int irq, struct irte *entry)
67 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
71 if (!entry || !irq_iommu)
74 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
76 index = irq_iommu->irte_index + irq_iommu->sub_handle;
77 *entry = *(irq_iommu->iommu->ir_table->base + index);
79 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
83 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
85 struct ir_table *table = iommu->ir_table;
86 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
87 u16 index, start_index;
88 unsigned int mask = 0;
92 if (!count || !irq_iommu)
96 * start the IRTE search from index 0.
98 index = start_index = 0;
101 count = __roundup_pow_of_two(count);
105 if (mask > ecap_max_handle_mask(iommu->ecap)) {
107 "Requested mask %x exceeds the max invalidation handle"
108 " mask value %Lx\n", mask,
109 ecap_max_handle_mask(iommu->ecap));
113 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
115 for (i = index; i < index + count; i++)
116 if (table->base[i].present)
118 /* empty index found */
119 if (i == index + count)
122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
124 if (index == start_index) {
125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126 printk(KERN_ERR "can't allocate an IRTE\n");
131 for (i = index; i < index + count; i++)
132 table->base[i].present = 1;
134 irq_iommu->iommu = iommu;
135 irq_iommu->irte_index = index;
136 irq_iommu->sub_handle = 0;
137 irq_iommu->irte_mask = mask;
139 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
144 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
148 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
152 return qi_submit_sync(&desc, iommu);
155 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
157 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
164 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
165 *sub_handle = irq_iommu->sub_handle;
166 index = irq_iommu->irte_index;
167 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
171 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
173 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
179 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
181 irq_iommu->iommu = iommu;
182 irq_iommu->irte_index = index;
183 irq_iommu->sub_handle = subhandle;
184 irq_iommu->irte_mask = 0;
186 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
191 int modify_irte(int irq, struct irte *irte_modified)
193 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
194 struct intel_iommu *iommu;
202 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
204 iommu = irq_iommu->iommu;
206 index = irq_iommu->irte_index + irq_iommu->sub_handle;
207 irte = &iommu->ir_table->base[index];
209 set_64bit(&irte->low, irte_modified->low);
210 set_64bit(&irte->high, irte_modified->high);
211 __iommu_flush_cache(iommu, irte, sizeof(*irte));
213 rc = qi_flush_iec(iommu, index, 0);
214 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
219 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
223 for (i = 0; i < MAX_HPET_TBS; i++)
224 if (ir_hpet[i].id == hpet_id)
225 return ir_hpet[i].iommu;
229 struct intel_iommu *map_ioapic_to_ir(int apic)
233 for (i = 0; i < MAX_IO_APICS; i++)
234 if (ir_ioapic[i].id == apic)
235 return ir_ioapic[i].iommu;
239 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
241 struct dmar_drhd_unit *drhd;
243 drhd = dmar_find_matched_drhd_unit(dev);
250 static int clear_entries(struct irq_2_iommu *irq_iommu)
252 struct irte *start, *entry, *end;
253 struct intel_iommu *iommu;
256 if (irq_iommu->sub_handle)
259 iommu = irq_iommu->iommu;
260 index = irq_iommu->irte_index + irq_iommu->sub_handle;
262 start = iommu->ir_table->base + index;
263 end = start + (1 << irq_iommu->irte_mask);
265 for (entry = start; entry < end; entry++) {
266 set_64bit(&entry->low, 0);
267 set_64bit(&entry->high, 0);
270 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
273 int free_irte(int irq)
275 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
282 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
284 rc = clear_entries(irq_iommu);
286 irq_iommu->iommu = NULL;
287 irq_iommu->irte_index = 0;
288 irq_iommu->sub_handle = 0;
289 irq_iommu->irte_mask = 0;
291 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
297 * source validation type
299 #define SVT_NO_VERIFY 0x0 /* no verification is required */
300 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
301 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
304 * source-id qualifier
306 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
307 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
308 * the third least significant bit
310 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
311 * the second and third least significant bits
313 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
314 * the least three significant bits
318 * set SVT, SQ and SID fields of irte to verify
319 * source ids of interrupt requests
321 static void set_irte_sid(struct irte *irte, unsigned int svt,
322 unsigned int sq, unsigned int sid)
324 if (disable_sourceid_checking)
331 int set_ioapic_sid(struct irte *irte, int apic)
339 for (i = 0; i < MAX_IO_APICS; i++) {
340 if (ir_ioapic[i].id == apic) {
341 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
347 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
351 set_irte_sid(irte, 1, 0, sid);
356 int set_hpet_sid(struct irte *irte, u8 id)
364 for (i = 0; i < MAX_HPET_TBS; i++) {
365 if (ir_hpet[i].id == id) {
366 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
372 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
377 * Should really use SQ_ALL_16. Some platforms are broken.
378 * While we figure out the right quirks for these broken platforms, use
379 * SQ_13_IGNORE_3 for now.
381 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
386 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
388 struct pci_dev *bridge;
393 /* PCIe device or Root Complex integrated PCI device */
394 if (pci_is_pcie(dev) || !dev->bus->parent) {
395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
396 (dev->bus->number << 8) | dev->devfn);
400 bridge = pci_find_upstream_pcie_bridge(dev);
402 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
403 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
404 (bridge->bus->number << 8) | dev->bus->number);
405 else /* this is a legacy PCI bridge */
406 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
407 (bridge->bus->number << 8) | bridge->devfn);
413 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
419 addr = virt_to_phys((void *)iommu->ir_table->base);
421 raw_spin_lock_irqsave(&iommu->register_lock, flags);
423 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
426 /* Set interrupt-remapping table pointer */
427 iommu->gcmd |= DMA_GCMD_SIRTP;
428 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRTPS), sts);
432 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
435 * global invalidation of interrupt entry cache before enabling
436 * interrupt-remapping.
438 qi_global_iec(iommu);
440 raw_spin_lock_irqsave(&iommu->register_lock, flags);
442 /* Enable interrupt-remapping */
443 iommu->gcmd |= DMA_GCMD_IRE;
444 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
447 readl, (sts & DMA_GSTS_IRES), sts);
449 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
453 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
455 struct ir_table *ir_table;
458 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
461 if (!iommu->ir_table)
464 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
465 INTR_REMAP_PAGE_ORDER);
468 printk(KERN_ERR "failed to allocate pages of order %d\n",
469 INTR_REMAP_PAGE_ORDER);
470 kfree(iommu->ir_table);
474 ir_table->base = page_address(pages);
476 iommu_set_intr_remapping(iommu, mode);
481 * Disable Interrupt Remapping.
483 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
488 if (!ecap_ir_support(iommu->ecap))
492 * global invalidation of interrupt entry cache before disabling
493 * interrupt-remapping.
495 qi_global_iec(iommu);
497 raw_spin_lock_irqsave(&iommu->register_lock, flags);
499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
500 if (!(sts & DMA_GSTS_IRES))
503 iommu->gcmd &= ~DMA_GCMD_IRE;
504 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
506 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
507 readl, !(sts & DMA_GSTS_IRES), sts);
510 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
513 static int __init dmar_x2apic_optout(void)
515 struct acpi_table_dmar *dmar;
516 dmar = (struct acpi_table_dmar *)dmar_tbl;
517 if (!dmar || no_x2apic_optout)
519 return dmar->flags & DMAR_X2APIC_OPT_OUT;
522 int __init intr_remapping_supported(void)
524 struct dmar_drhd_unit *drhd;
526 if (disable_intremap)
529 if (!dmar_ir_support())
532 for_each_drhd_unit(drhd) {
533 struct intel_iommu *iommu = drhd->iommu;
535 if (!ecap_ir_support(iommu->ecap))
542 int __init enable_intr_remapping(void)
544 struct dmar_drhd_unit *drhd;
548 if (parse_ioapics_under_ir() != 1) {
549 printk(KERN_INFO "Not enable interrupt remapping\n");
553 if (x2apic_supported()) {
554 eim = !dmar_x2apic_optout();
555 WARN(!eim, KERN_WARNING
556 "Your BIOS is broken and requested that x2apic be disabled\n"
557 "This will leave your machine vulnerable to irq-injection attacks\n"
558 "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
561 for_each_drhd_unit(drhd) {
562 struct intel_iommu *iommu = drhd->iommu;
565 * If the queued invalidation is already initialized,
566 * shouldn't disable it.
572 * Clear previous faults.
574 dmar_fault(-1, iommu);
577 * Disable intr remapping and queued invalidation, if already
578 * enabled prior to OS handover.
580 iommu_disable_intr_remapping(iommu);
582 dmar_disable_qi(iommu);
586 * check for the Interrupt-remapping support
588 for_each_drhd_unit(drhd) {
589 struct intel_iommu *iommu = drhd->iommu;
591 if (!ecap_ir_support(iommu->ecap))
594 if (eim && !ecap_eim_support(iommu->ecap)) {
595 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
596 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
602 * Enable queued invalidation for all the DRHD's.
604 for_each_drhd_unit(drhd) {
606 struct intel_iommu *iommu = drhd->iommu;
607 ret = dmar_enable_qi(iommu);
610 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
611 " invalidation, ecap %Lx, ret %d\n",
612 drhd->reg_base_addr, iommu->ecap, ret);
618 * Setup Interrupt-remapping for all the DRHD's now.
620 for_each_drhd_unit(drhd) {
621 struct intel_iommu *iommu = drhd->iommu;
623 if (!ecap_ir_support(iommu->ecap))
626 if (setup_intr_remapping(iommu, eim))
635 intr_remapping_enabled = 1;
636 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
638 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
642 * handle error condition gracefully here!
647 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
648 struct intel_iommu *iommu)
650 struct acpi_dmar_pci_path *path;
655 path = (struct acpi_dmar_pci_path *)(scope + 1);
656 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
657 / sizeof(struct acpi_dmar_pci_path);
659 while (--count > 0) {
661 * Access PCI directly due to the PCI
662 * subsystem isn't initialized yet.
664 bus = read_pci_config_byte(bus, path->dev, path->fn,
668 ir_hpet[ir_hpet_num].bus = bus;
669 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
670 ir_hpet[ir_hpet_num].iommu = iommu;
671 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
675 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
676 struct intel_iommu *iommu)
678 struct acpi_dmar_pci_path *path;
683 path = (struct acpi_dmar_pci_path *)(scope + 1);
684 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
685 / sizeof(struct acpi_dmar_pci_path);
687 while (--count > 0) {
689 * Access PCI directly due to the PCI
690 * subsystem isn't initialized yet.
692 bus = read_pci_config_byte(bus, path->dev, path->fn,
697 ir_ioapic[ir_ioapic_num].bus = bus;
698 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
699 ir_ioapic[ir_ioapic_num].iommu = iommu;
700 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
704 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
705 struct intel_iommu *iommu)
707 struct acpi_dmar_hardware_unit *drhd;
708 struct acpi_dmar_device_scope *scope;
711 drhd = (struct acpi_dmar_hardware_unit *)header;
713 start = (void *)(drhd + 1);
714 end = ((void *)drhd) + header->length;
716 while (start < end) {
718 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
719 if (ir_ioapic_num == MAX_IO_APICS) {
720 printk(KERN_WARNING "Exceeded Max IO APICS\n");
724 printk(KERN_INFO "IOAPIC id %d under DRHD base "
725 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
726 drhd->address, iommu->seq_id);
728 ir_parse_one_ioapic_scope(scope, iommu);
729 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
730 if (ir_hpet_num == MAX_HPET_TBS) {
731 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
735 printk(KERN_INFO "HPET id %d under DRHD base"
736 " 0x%Lx\n", scope->enumeration_id,
739 ir_parse_one_hpet_scope(scope, iommu);
741 start += scope->length;
748 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
751 int __init parse_ioapics_under_ir(void)
753 struct dmar_drhd_unit *drhd;
754 int ir_supported = 0;
756 for_each_drhd_unit(drhd) {
757 struct intel_iommu *iommu = drhd->iommu;
759 if (ecap_ir_support(iommu->ecap)) {
760 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
767 if (ir_supported && ir_ioapic_num != nr_ioapics) {
769 "Not all IO-APIC's listed under remapping hardware\n");
776 int __init ir_dev_scope_init(void)
778 if (!intr_remapping_enabled)
781 return dmar_dev_scope_init();
783 rootfs_initcall(ir_dev_scope_init);
785 void disable_intr_remapping(void)
787 struct dmar_drhd_unit *drhd;
788 struct intel_iommu *iommu = NULL;
791 * Disable Interrupt-remapping for all the DRHD's now.
793 for_each_iommu(iommu, drhd) {
794 if (!ecap_ir_support(iommu->ecap))
797 iommu_disable_intr_remapping(iommu);
801 int reenable_intr_remapping(int eim)
803 struct dmar_drhd_unit *drhd;
805 struct intel_iommu *iommu = NULL;
807 for_each_iommu(iommu, drhd)
809 dmar_reenable_qi(iommu);
812 * Setup Interrupt-remapping for all the DRHD's now.
814 for_each_iommu(iommu, drhd) {
815 if (!ecap_ir_support(iommu->ecap))
818 /* Set up interrupt remapping for iommu.*/
819 iommu_set_intr_remapping(iommu, eim);
830 * handle error condition gracefully here!