1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
12 #include <acpi/acpi.h>
13 #include <asm/pci-direct.h>
16 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
17 static int ir_ioapic_num;
18 int intr_remapping_enabled;
20 static int disable_intremap;
21 static __init int setup_nointremap(char *str)
26 early_param("nointremap", setup_nointremap);
29 struct intel_iommu *iommu;
35 #ifdef CONFIG_GENERIC_HARDIRQS
36 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
38 struct irq_2_iommu *iommu;
40 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
41 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
46 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
48 struct irq_desc *desc;
50 desc = irq_to_desc(irq);
52 if (WARN_ON_ONCE(!desc))
55 return desc->irq_2_iommu;
58 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
60 struct irq_desc *desc;
61 struct irq_2_iommu *irq_iommu;
63 desc = irq_to_desc(irq);
65 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69 irq_iommu = desc->irq_2_iommu;
72 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
74 return desc->irq_2_iommu;
77 #else /* !CONFIG_SPARSE_IRQ */
79 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
81 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
84 return &irq_2_iommuX[irq];
88 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
90 return irq_2_iommu(irq);
94 static DEFINE_SPINLOCK(irq_2_ir_lock);
96 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
98 struct irq_2_iommu *irq_iommu;
100 irq_iommu = irq_2_iommu(irq);
105 if (!irq_iommu->iommu)
111 int irq_remapped(int irq)
113 return valid_irq_2_iommu(irq) != NULL;
116 int get_irte(int irq, struct irte *entry)
119 struct irq_2_iommu *irq_iommu;
125 spin_lock_irqsave(&irq_2_ir_lock, flags);
126 irq_iommu = valid_irq_2_iommu(irq);
128 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
132 index = irq_iommu->irte_index + irq_iommu->sub_handle;
133 *entry = *(irq_iommu->iommu->ir_table->base + index);
135 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
139 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
141 struct ir_table *table = iommu->ir_table;
142 struct irq_2_iommu *irq_iommu;
143 u16 index, start_index;
144 unsigned int mask = 0;
151 #ifndef CONFIG_SPARSE_IRQ
152 /* protect irq_2_iommu_alloc later */
158 * start the IRTE search from index 0.
160 index = start_index = 0;
163 count = __roundup_pow_of_two(count);
167 if (mask > ecap_max_handle_mask(iommu->ecap)) {
169 "Requested mask %x exceeds the max invalidation handle"
170 " mask value %Lx\n", mask,
171 ecap_max_handle_mask(iommu->ecap));
175 spin_lock_irqsave(&irq_2_ir_lock, flags);
177 for (i = index; i < index + count; i++)
178 if (table->base[i].present)
180 /* empty index found */
181 if (i == index + count)
184 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
186 if (index == start_index) {
187 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
188 printk(KERN_ERR "can't allocate an IRTE\n");
193 for (i = index; i < index + count; i++)
194 table->base[i].present = 1;
196 irq_iommu = irq_2_iommu_alloc(irq);
198 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
199 printk(KERN_ERR "can't allocate irq_2_iommu\n");
203 irq_iommu->iommu = iommu;
204 irq_iommu->irte_index = index;
205 irq_iommu->sub_handle = 0;
206 irq_iommu->irte_mask = mask;
208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
213 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
217 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
221 return qi_submit_sync(&desc, iommu);
224 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
227 struct irq_2_iommu *irq_iommu;
230 spin_lock_irqsave(&irq_2_ir_lock, flags);
231 irq_iommu = valid_irq_2_iommu(irq);
233 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
237 *sub_handle = irq_iommu->sub_handle;
238 index = irq_iommu->irte_index;
239 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
243 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
245 struct irq_2_iommu *irq_iommu;
248 spin_lock_irqsave(&irq_2_ir_lock, flags);
250 irq_iommu = irq_2_iommu_alloc(irq);
253 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
254 printk(KERN_ERR "can't allocate irq_2_iommu\n");
258 irq_iommu->iommu = iommu;
259 irq_iommu->irte_index = index;
260 irq_iommu->sub_handle = subhandle;
261 irq_iommu->irte_mask = 0;
263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
268 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
270 struct irq_2_iommu *irq_iommu;
273 spin_lock_irqsave(&irq_2_ir_lock, flags);
274 irq_iommu = valid_irq_2_iommu(irq);
276 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 irq_iommu->iommu = NULL;
281 irq_iommu->irte_index = 0;
282 irq_iommu->sub_handle = 0;
283 irq_2_iommu(irq)->irte_mask = 0;
285 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
290 int modify_irte(int irq, struct irte *irte_modified)
295 struct intel_iommu *iommu;
296 struct irq_2_iommu *irq_iommu;
299 spin_lock_irqsave(&irq_2_ir_lock, flags);
300 irq_iommu = valid_irq_2_iommu(irq);
302 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
306 iommu = irq_iommu->iommu;
308 index = irq_iommu->irte_index + irq_iommu->sub_handle;
309 irte = &iommu->ir_table->base[index];
311 set_64bit((unsigned long *)&irte->low, irte_modified->low);
312 set_64bit((unsigned long *)&irte->high, irte_modified->high);
313 __iommu_flush_cache(iommu, irte, sizeof(*irte));
315 rc = qi_flush_iec(iommu, index, 0);
316 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
321 int flush_irte(int irq)
325 struct intel_iommu *iommu;
326 struct irq_2_iommu *irq_iommu;
329 spin_lock_irqsave(&irq_2_ir_lock, flags);
330 irq_iommu = valid_irq_2_iommu(irq);
332 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
336 iommu = irq_iommu->iommu;
338 index = irq_iommu->irte_index + irq_iommu->sub_handle;
340 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
341 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
346 struct intel_iommu *map_ioapic_to_ir(int apic)
350 for (i = 0; i < MAX_IO_APICS; i++)
351 if (ir_ioapic[i].id == apic)
352 return ir_ioapic[i].iommu;
356 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
358 struct dmar_drhd_unit *drhd;
360 drhd = dmar_find_matched_drhd_unit(dev);
367 static int clear_entries(struct irq_2_iommu *irq_iommu)
369 struct irte *start, *entry, *end;
370 struct intel_iommu *iommu;
373 if (irq_iommu->sub_handle)
376 iommu = irq_iommu->iommu;
377 index = irq_iommu->irte_index + irq_iommu->sub_handle;
379 start = iommu->ir_table->base + index;
380 end = start + (1 << irq_iommu->irte_mask);
382 for (entry = start; entry < end; entry++) {
383 set_64bit((unsigned long *)&entry->low, 0);
384 set_64bit((unsigned long *)&entry->high, 0);
387 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
390 int free_irte(int irq)
393 struct irq_2_iommu *irq_iommu;
396 spin_lock_irqsave(&irq_2_ir_lock, flags);
397 irq_iommu = valid_irq_2_iommu(irq);
399 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
403 rc = clear_entries(irq_iommu);
405 irq_iommu->iommu = NULL;
406 irq_iommu->irte_index = 0;
407 irq_iommu->sub_handle = 0;
408 irq_iommu->irte_mask = 0;
410 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
416 * source validation type
418 #define SVT_NO_VERIFY 0x0 /* no verification is required */
419 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
420 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
423 * source-id qualifier
425 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
426 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
427 * the third least significant bit
429 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
430 * the second and third least significant bits
432 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
433 * the least three significant bits
437 * set SVT, SQ and SID fields of irte to verify
438 * source ids of interrupt requests
440 static void set_irte_sid(struct irte *irte, unsigned int svt,
441 unsigned int sq, unsigned int sid)
448 int set_ioapic_sid(struct irte *irte, int apic)
456 for (i = 0; i < MAX_IO_APICS; i++) {
457 if (ir_ioapic[i].id == apic) {
458 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
464 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
468 set_irte_sid(irte, 1, 0, sid);
473 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
475 struct pci_dev *bridge;
480 /* PCIe device or Root Complex integrated PCI device */
481 if (dev->is_pcie || !dev->bus->parent) {
482 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
483 (dev->bus->number << 8) | dev->devfn);
487 bridge = pci_find_upstream_pcie_bridge(dev);
489 if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
490 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
491 (bridge->bus->number << 8) | dev->bus->number);
492 else /* this is a legacy PCI bridge */
493 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
494 (bridge->bus->number << 8) | bridge->devfn);
500 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
506 addr = virt_to_phys((void *)iommu->ir_table->base);
508 spin_lock_irqsave(&iommu->register_lock, flags);
510 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
511 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
513 /* Set interrupt-remapping table pointer */
514 iommu->gcmd |= DMA_GCMD_SIRTP;
515 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
517 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
518 readl, (sts & DMA_GSTS_IRTPS), sts);
519 spin_unlock_irqrestore(&iommu->register_lock, flags);
522 * global invalidation of interrupt entry cache before enabling
523 * interrupt-remapping.
525 qi_global_iec(iommu);
527 spin_lock_irqsave(&iommu->register_lock, flags);
529 /* Enable interrupt-remapping */
530 iommu->gcmd |= DMA_GCMD_IRE;
531 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
533 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
534 readl, (sts & DMA_GSTS_IRES), sts);
536 spin_unlock_irqrestore(&iommu->register_lock, flags);
540 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
542 struct ir_table *ir_table;
545 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
548 if (!iommu->ir_table)
551 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
554 printk(KERN_ERR "failed to allocate pages of order %d\n",
555 INTR_REMAP_PAGE_ORDER);
556 kfree(iommu->ir_table);
560 ir_table->base = page_address(pages);
562 iommu_set_intr_remapping(iommu, mode);
567 * Disable Interrupt Remapping.
569 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
574 if (!ecap_ir_support(iommu->ecap))
578 * global invalidation of interrupt entry cache before disabling
579 * interrupt-remapping.
581 qi_global_iec(iommu);
583 spin_lock_irqsave(&iommu->register_lock, flags);
585 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
586 if (!(sts & DMA_GSTS_IRES))
589 iommu->gcmd &= ~DMA_GCMD_IRE;
590 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
592 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
593 readl, !(sts & DMA_GSTS_IRES), sts);
596 spin_unlock_irqrestore(&iommu->register_lock, flags);
599 int __init intr_remapping_supported(void)
601 struct dmar_drhd_unit *drhd;
603 if (disable_intremap)
606 for_each_drhd_unit(drhd) {
607 struct intel_iommu *iommu = drhd->iommu;
609 if (!ecap_ir_support(iommu->ecap))
616 int __init enable_intr_remapping(int eim)
618 struct dmar_drhd_unit *drhd;
621 for_each_drhd_unit(drhd) {
622 struct intel_iommu *iommu = drhd->iommu;
625 * If the queued invalidation is already initialized,
626 * shouldn't disable it.
632 * Clear previous faults.
634 dmar_fault(-1, iommu);
637 * Disable intr remapping and queued invalidation, if already
638 * enabled prior to OS handover.
640 iommu_disable_intr_remapping(iommu);
642 dmar_disable_qi(iommu);
646 * check for the Interrupt-remapping support
648 for_each_drhd_unit(drhd) {
649 struct intel_iommu *iommu = drhd->iommu;
651 if (!ecap_ir_support(iommu->ecap))
654 if (eim && !ecap_eim_support(iommu->ecap)) {
655 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
656 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
662 * Enable queued invalidation for all the DRHD's.
664 for_each_drhd_unit(drhd) {
666 struct intel_iommu *iommu = drhd->iommu;
667 ret = dmar_enable_qi(iommu);
670 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
671 " invalidation, ecap %Lx, ret %d\n",
672 drhd->reg_base_addr, iommu->ecap, ret);
678 * Setup Interrupt-remapping for all the DRHD's now.
680 for_each_drhd_unit(drhd) {
681 struct intel_iommu *iommu = drhd->iommu;
683 if (!ecap_ir_support(iommu->ecap))
686 if (setup_intr_remapping(iommu, eim))
695 intr_remapping_enabled = 1;
701 * handle error condition gracefully here!
706 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
707 struct intel_iommu *iommu)
709 struct acpi_dmar_pci_path *path;
714 path = (struct acpi_dmar_pci_path *)(scope + 1);
715 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
716 / sizeof(struct acpi_dmar_pci_path);
718 while (--count > 0) {
720 * Access PCI directly due to the PCI
721 * subsystem isn't initialized yet.
723 bus = read_pci_config_byte(bus, path->dev, path->fn,
728 ir_ioapic[ir_ioapic_num].bus = bus;
729 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
730 ir_ioapic[ir_ioapic_num].iommu = iommu;
731 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
735 static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
736 struct intel_iommu *iommu)
738 struct acpi_dmar_hardware_unit *drhd;
739 struct acpi_dmar_device_scope *scope;
742 drhd = (struct acpi_dmar_hardware_unit *)header;
744 start = (void *)(drhd + 1);
745 end = ((void *)drhd) + header->length;
747 while (start < end) {
749 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
750 if (ir_ioapic_num == MAX_IO_APICS) {
751 printk(KERN_WARNING "Exceeded Max IO APICS\n");
755 printk(KERN_INFO "IOAPIC id %d under DRHD base"
756 " 0x%Lx\n", scope->enumeration_id,
759 ir_parse_one_ioapic_scope(scope, iommu);
761 start += scope->length;
768 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
771 int __init parse_ioapics_under_ir(void)
773 struct dmar_drhd_unit *drhd;
774 int ir_supported = 0;
776 for_each_drhd_unit(drhd) {
777 struct intel_iommu *iommu = drhd->iommu;
779 if (ecap_ir_support(iommu->ecap)) {
780 if (ir_parse_ioapic_scope(drhd->hdr, iommu))
787 if (ir_supported && ir_ioapic_num != nr_ioapics) {
789 "Not all IO-APIC's listed under remapping hardware\n");
796 void disable_intr_remapping(void)
798 struct dmar_drhd_unit *drhd;
799 struct intel_iommu *iommu = NULL;
802 * Disable Interrupt-remapping for all the DRHD's now.
804 for_each_iommu(iommu, drhd) {
805 if (!ecap_ir_support(iommu->ecap))
808 iommu_disable_intr_remapping(iommu);
812 int reenable_intr_remapping(int eim)
814 struct dmar_drhd_unit *drhd;
816 struct intel_iommu *iommu = NULL;
818 for_each_iommu(iommu, drhd)
820 dmar_reenable_qi(iommu);
823 * Setup Interrupt-remapping for all the DRHD's now.
825 for_each_iommu(iommu, drhd) {
826 if (!ecap_ir_support(iommu->ecap))
829 /* Set up interrupt remapping for iommu.*/
830 iommu_set_intr_remapping(iommu, eim);
841 * handle error condition gracefully here!