1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include <acpi/acpi.h>
14 #include <asm/intr_remapping.h>
15 #include <asm/pci-direct.h>
17 #include "intr_remapping.h"
20 struct intel_iommu *iommu;
22 unsigned int bus; /* PCI bus number */
23 unsigned int devfn; /* PCI devfn number */
27 struct intel_iommu *iommu;
33 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
34 #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
36 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
37 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
38 static int ir_ioapic_num, ir_hpet_num;
40 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
42 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
44 struct irq_cfg *cfg = irq_get_chip_data(irq);
45 return cfg ? &cfg->irq_2_iommu : NULL;
48 int get_irte(int irq, struct irte *entry)
50 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
54 if (!entry || !irq_iommu)
57 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
59 index = irq_iommu->irte_index + irq_iommu->sub_handle;
60 *entry = *(irq_iommu->iommu->ir_table->base + index);
62 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
66 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
68 struct ir_table *table = iommu->ir_table;
69 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
70 u16 index, start_index;
71 unsigned int mask = 0;
75 if (!count || !irq_iommu)
79 * start the IRTE search from index 0.
81 index = start_index = 0;
84 count = __roundup_pow_of_two(count);
88 if (mask > ecap_max_handle_mask(iommu->ecap)) {
90 "Requested mask %x exceeds the max invalidation handle"
91 " mask value %Lx\n", mask,
92 ecap_max_handle_mask(iommu->ecap));
96 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
98 for (i = index; i < index + count; i++)
99 if (table->base[i].present)
101 /* empty index found */
102 if (i == index + count)
105 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
107 if (index == start_index) {
108 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
109 printk(KERN_ERR "can't allocate an IRTE\n");
114 for (i = index; i < index + count; i++)
115 table->base[i].present = 1;
117 irq_iommu->iommu = iommu;
118 irq_iommu->irte_index = index;
119 irq_iommu->sub_handle = 0;
120 irq_iommu->irte_mask = mask;
122 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
127 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
131 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
135 return qi_submit_sync(&desc, iommu);
138 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
140 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
147 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
148 *sub_handle = irq_iommu->sub_handle;
149 index = irq_iommu->irte_index;
150 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
154 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
156 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
162 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
164 irq_iommu->iommu = iommu;
165 irq_iommu->irte_index = index;
166 irq_iommu->sub_handle = subhandle;
167 irq_iommu->irte_mask = 0;
169 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
174 int modify_irte(int irq, struct irte *irte_modified)
176 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
177 struct intel_iommu *iommu;
185 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
187 iommu = irq_iommu->iommu;
189 index = irq_iommu->irte_index + irq_iommu->sub_handle;
190 irte = &iommu->ir_table->base[index];
192 set_64bit(&irte->low, irte_modified->low);
193 set_64bit(&irte->high, irte_modified->high);
194 __iommu_flush_cache(iommu, irte, sizeof(*irte));
196 rc = qi_flush_iec(iommu, index, 0);
197 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
202 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
206 for (i = 0; i < MAX_HPET_TBS; i++)
207 if (ir_hpet[i].id == hpet_id)
208 return ir_hpet[i].iommu;
212 struct intel_iommu *map_ioapic_to_ir(int apic)
216 for (i = 0; i < MAX_IO_APICS; i++)
217 if (ir_ioapic[i].id == apic)
218 return ir_ioapic[i].iommu;
222 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
224 struct dmar_drhd_unit *drhd;
226 drhd = dmar_find_matched_drhd_unit(dev);
233 static int clear_entries(struct irq_2_iommu *irq_iommu)
235 struct irte *start, *entry, *end;
236 struct intel_iommu *iommu;
239 if (irq_iommu->sub_handle)
242 iommu = irq_iommu->iommu;
243 index = irq_iommu->irte_index + irq_iommu->sub_handle;
245 start = iommu->ir_table->base + index;
246 end = start + (1 << irq_iommu->irte_mask);
248 for (entry = start; entry < end; entry++) {
249 set_64bit(&entry->low, 0);
250 set_64bit(&entry->high, 0);
253 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
256 int free_irte(int irq)
258 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
265 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
267 rc = clear_entries(irq_iommu);
269 irq_iommu->iommu = NULL;
270 irq_iommu->irte_index = 0;
271 irq_iommu->sub_handle = 0;
272 irq_iommu->irte_mask = 0;
274 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 * source validation type
282 #define SVT_NO_VERIFY 0x0 /* no verification is required */
283 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
284 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
287 * source-id qualifier
289 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
290 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
291 * the third least significant bit
293 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
294 * the second and third least significant bits
296 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
297 * the least three significant bits
301 * set SVT, SQ and SID fields of irte to verify
302 * source ids of interrupt requests
304 static void set_irte_sid(struct irte *irte, unsigned int svt,
305 unsigned int sq, unsigned int sid)
307 if (disable_sourceid_checking)
314 int set_ioapic_sid(struct irte *irte, int apic)
322 for (i = 0; i < MAX_IO_APICS; i++) {
323 if (ir_ioapic[i].id == apic) {
324 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
330 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
334 set_irte_sid(irte, 1, 0, sid);
339 int set_hpet_sid(struct irte *irte, u8 id)
347 for (i = 0; i < MAX_HPET_TBS; i++) {
348 if (ir_hpet[i].id == id) {
349 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
355 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
360 * Should really use SQ_ALL_16. Some platforms are broken.
361 * While we figure out the right quirks for these broken platforms, use
362 * SQ_13_IGNORE_3 for now.
364 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
369 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
371 struct pci_dev *bridge;
376 /* PCIe device or Root Complex integrated PCI device */
377 if (pci_is_pcie(dev) || !dev->bus->parent) {
378 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
379 (dev->bus->number << 8) | dev->devfn);
383 bridge = pci_find_upstream_pcie_bridge(dev);
385 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
386 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
387 (bridge->bus->number << 8) | dev->bus->number);
388 else /* this is a legacy PCI bridge */
389 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
390 (bridge->bus->number << 8) | bridge->devfn);
396 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
402 addr = virt_to_phys((void *)iommu->ir_table->base);
404 raw_spin_lock_irqsave(&iommu->register_lock, flags);
406 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
407 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
409 /* Set interrupt-remapping table pointer */
410 iommu->gcmd |= DMA_GCMD_SIRTP;
411 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
413 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
414 readl, (sts & DMA_GSTS_IRTPS), sts);
415 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
418 * global invalidation of interrupt entry cache before enabling
419 * interrupt-remapping.
421 qi_global_iec(iommu);
423 raw_spin_lock_irqsave(&iommu->register_lock, flags);
425 /* Enable interrupt-remapping */
426 iommu->gcmd |= DMA_GCMD_IRE;
427 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
430 readl, (sts & DMA_GSTS_IRES), sts);
432 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
436 static int intel_setup_intr_remapping(struct intel_iommu *iommu, int mode)
438 struct ir_table *ir_table;
441 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
444 if (!iommu->ir_table)
447 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
448 INTR_REMAP_PAGE_ORDER);
451 printk(KERN_ERR "failed to allocate pages of order %d\n",
452 INTR_REMAP_PAGE_ORDER);
453 kfree(iommu->ir_table);
457 ir_table->base = page_address(pages);
459 iommu_set_intr_remapping(iommu, mode);
464 * Disable Interrupt Remapping.
466 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
471 if (!ecap_ir_support(iommu->ecap))
475 * global invalidation of interrupt entry cache before disabling
476 * interrupt-remapping.
478 qi_global_iec(iommu);
480 raw_spin_lock_irqsave(&iommu->register_lock, flags);
482 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
483 if (!(sts & DMA_GSTS_IRES))
486 iommu->gcmd &= ~DMA_GCMD_IRE;
487 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
489 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
490 readl, !(sts & DMA_GSTS_IRES), sts);
493 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
496 static int __init dmar_x2apic_optout(void)
498 struct acpi_table_dmar *dmar;
499 dmar = (struct acpi_table_dmar *)dmar_tbl;
500 if (!dmar || no_x2apic_optout)
502 return dmar->flags & DMAR_X2APIC_OPT_OUT;
505 static int __init intel_intr_remapping_supported(void)
507 struct dmar_drhd_unit *drhd;
509 if (disable_intremap)
512 if (!dmar_ir_support())
515 for_each_drhd_unit(drhd) {
516 struct intel_iommu *iommu = drhd->iommu;
518 if (!ecap_ir_support(iommu->ecap))
525 static int __init intel_enable_intr_remapping(void)
527 struct dmar_drhd_unit *drhd;
531 if (parse_ioapics_under_ir() != 1) {
532 printk(KERN_INFO "Not enable interrupt remapping\n");
536 if (x2apic_supported()) {
537 eim = !dmar_x2apic_optout();
538 WARN(!eim, KERN_WARNING
539 "Your BIOS is broken and requested that x2apic be disabled\n"
540 "This will leave your machine vulnerable to irq-injection attacks\n"
541 "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
544 for_each_drhd_unit(drhd) {
545 struct intel_iommu *iommu = drhd->iommu;
548 * If the queued invalidation is already initialized,
549 * shouldn't disable it.
555 * Clear previous faults.
557 dmar_fault(-1, iommu);
560 * Disable intr remapping and queued invalidation, if already
561 * enabled prior to OS handover.
563 iommu_disable_intr_remapping(iommu);
565 dmar_disable_qi(iommu);
569 * check for the Interrupt-remapping support
571 for_each_drhd_unit(drhd) {
572 struct intel_iommu *iommu = drhd->iommu;
574 if (!ecap_ir_support(iommu->ecap))
577 if (eim && !ecap_eim_support(iommu->ecap)) {
578 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
579 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
585 * Enable queued invalidation for all the DRHD's.
587 for_each_drhd_unit(drhd) {
589 struct intel_iommu *iommu = drhd->iommu;
590 ret = dmar_enable_qi(iommu);
593 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
594 " invalidation, ecap %Lx, ret %d\n",
595 drhd->reg_base_addr, iommu->ecap, ret);
601 * Setup Interrupt-remapping for all the DRHD's now.
603 for_each_drhd_unit(drhd) {
604 struct intel_iommu *iommu = drhd->iommu;
606 if (!ecap_ir_support(iommu->ecap))
609 if (intel_setup_intr_remapping(iommu, eim))
618 intr_remapping_enabled = 1;
619 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
621 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
625 * handle error condition gracefully here!
630 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
631 struct intel_iommu *iommu)
633 struct acpi_dmar_pci_path *path;
638 path = (struct acpi_dmar_pci_path *)(scope + 1);
639 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
640 / sizeof(struct acpi_dmar_pci_path);
642 while (--count > 0) {
644 * Access PCI directly due to the PCI
645 * subsystem isn't initialized yet.
647 bus = read_pci_config_byte(bus, path->dev, path->fn,
651 ir_hpet[ir_hpet_num].bus = bus;
652 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
653 ir_hpet[ir_hpet_num].iommu = iommu;
654 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
658 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
659 struct intel_iommu *iommu)
661 struct acpi_dmar_pci_path *path;
666 path = (struct acpi_dmar_pci_path *)(scope + 1);
667 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
668 / sizeof(struct acpi_dmar_pci_path);
670 while (--count > 0) {
672 * Access PCI directly due to the PCI
673 * subsystem isn't initialized yet.
675 bus = read_pci_config_byte(bus, path->dev, path->fn,
680 ir_ioapic[ir_ioapic_num].bus = bus;
681 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
682 ir_ioapic[ir_ioapic_num].iommu = iommu;
683 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
687 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
688 struct intel_iommu *iommu)
690 struct acpi_dmar_hardware_unit *drhd;
691 struct acpi_dmar_device_scope *scope;
694 drhd = (struct acpi_dmar_hardware_unit *)header;
696 start = (void *)(drhd + 1);
697 end = ((void *)drhd) + header->length;
699 while (start < end) {
701 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
702 if (ir_ioapic_num == MAX_IO_APICS) {
703 printk(KERN_WARNING "Exceeded Max IO APICS\n");
707 printk(KERN_INFO "IOAPIC id %d under DRHD base "
708 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
709 drhd->address, iommu->seq_id);
711 ir_parse_one_ioapic_scope(scope, iommu);
712 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
713 if (ir_hpet_num == MAX_HPET_TBS) {
714 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
718 printk(KERN_INFO "HPET id %d under DRHD base"
719 " 0x%Lx\n", scope->enumeration_id,
722 ir_parse_one_hpet_scope(scope, iommu);
724 start += scope->length;
731 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
734 int __init parse_ioapics_under_ir(void)
736 struct dmar_drhd_unit *drhd;
737 int ir_supported = 0;
739 for_each_drhd_unit(drhd) {
740 struct intel_iommu *iommu = drhd->iommu;
742 if (ecap_ir_support(iommu->ecap)) {
743 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
750 if (ir_supported && ir_ioapic_num != nr_ioapics) {
752 "Not all IO-APIC's listed under remapping hardware\n");
759 int __init ir_dev_scope_init(void)
761 if (!intr_remapping_enabled)
764 return dmar_dev_scope_init();
766 rootfs_initcall(ir_dev_scope_init);
768 static void disable_intr_remapping(void)
770 struct dmar_drhd_unit *drhd;
771 struct intel_iommu *iommu = NULL;
774 * Disable Interrupt-remapping for all the DRHD's now.
776 for_each_iommu(iommu, drhd) {
777 if (!ecap_ir_support(iommu->ecap))
780 iommu_disable_intr_remapping(iommu);
784 static int reenable_intr_remapping(int eim)
786 struct dmar_drhd_unit *drhd;
788 struct intel_iommu *iommu = NULL;
790 for_each_iommu(iommu, drhd)
792 dmar_reenable_qi(iommu);
795 * Setup Interrupt-remapping for all the DRHD's now.
797 for_each_iommu(iommu, drhd) {
798 if (!ecap_ir_support(iommu->ecap))
801 /* Set up interrupt remapping for iommu.*/
802 iommu_set_intr_remapping(iommu, eim);
813 * handle error condition gracefully here!
818 static void prepare_irte(struct irte *irte, int vector,
821 memset(irte, 0, sizeof(*irte));
824 irte->dst_mode = apic->irq_dest_mode;
826 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
827 * actual level or edge trigger will be setup in the IO-APIC
828 * RTE. This will help simplify level triggered irq migration.
829 * For more details, see the comments (in io_apic.c) explainig IO-APIC
830 * irq migration in the presence of interrupt-remapping.
832 irte->trigger_mode = 0;
833 irte->dlvry_mode = apic->irq_delivery_mode;
834 irte->vector = vector;
835 irte->dest_id = IRTE_DEST(dest);
836 irte->redir_hint = 1;
839 static int intel_setup_ioapic_entry(int irq,
840 struct IO_APIC_route_entry *route_entry,
841 unsigned int destination, int vector,
842 struct io_apic_irq_attr *attr)
844 int ioapic_id = mpc_ioapic_id(attr->ioapic);
845 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
846 struct IR_IO_APIC_route_entry *entry;
851 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
855 entry = (struct IR_IO_APIC_route_entry *)route_entry;
857 index = alloc_irte(iommu, irq, 1);
859 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
863 prepare_irte(&irte, vector, destination);
865 /* Set source-id of interrupt request */
866 set_ioapic_sid(&irte, ioapic_id);
868 modify_irte(irq, &irte);
870 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
871 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
872 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
873 "Avail:%X Vector:%02X Dest:%08X "
874 "SID:%04X SQ:%X SVT:%X)\n",
875 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
876 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
877 irte.avail, irte.vector, irte.dest_id,
878 irte.sid, irte.sq, irte.svt);
880 memset(entry, 0, sizeof(*entry));
882 entry->index2 = (index >> 15) & 0x1;
885 entry->index = (index & 0x7fff);
887 * IO-APIC RTE will be configured with virtual vector.
888 * irq handler will do the explicit EOI to the io-apic.
890 entry->vector = attr->ioapic_pin;
891 entry->mask = 0; /* enable IRQ */
892 entry->trigger = attr->trigger;
893 entry->polarity = attr->polarity;
895 /* Mask level triggered irqs.
896 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
905 * Migrate the IO-APIC irq in the presence of intr-remapping.
907 * For both level and edge triggered, irq migration is a simple atomic
908 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
910 * For level triggered, we eliminate the io-apic RTE modification (with the
911 * updated vector information), by using a virtual vector (io-apic pin number).
912 * Real vector that is used for interrupting cpu will be coming from
913 * the interrupt-remapping table entry.
915 * As the migration is a simple atomic update of IRTE, the same mechanism
916 * is used to migrate MSI irq's in the presence of interrupt-remapping.
919 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
922 struct irq_cfg *cfg = data->chip_data;
923 unsigned int dest, irq = data->irq;
926 if (!cpumask_intersects(mask, cpu_online_mask))
929 if (get_irte(irq, &irte))
932 if (assign_irq_vector(irq, cfg, mask))
935 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
937 irte.vector = cfg->vector;
938 irte.dest_id = IRTE_DEST(dest);
941 * Atomically updates the IRTE with the new destination, vector
942 * and flushes the interrupt entry cache.
944 modify_irte(irq, &irte);
947 * After this point, all the interrupts will start arriving
948 * at the new destination. So, time to cleanup the previous
951 if (cfg->move_in_progress)
952 send_cleanup_vector(cfg);
954 cpumask_copy(data->affinity, mask);
958 struct irq_remap_ops intel_irq_remap_ops = {
959 .supported = intel_intr_remapping_supported,
960 .hardware_init = dmar_table_init,
961 .hardware_enable = intel_enable_intr_remapping,
962 .hardware_disable = disable_intr_remapping,
963 .hardware_reenable = reenable_intr_remapping,
964 .enable_faulting = enable_drhd_fault_handling,
965 .setup_ioapic_entry = intel_setup_ioapic_entry,
966 .set_affinity = intel_ioapic_set_affinity,