2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/export.h>
29 #include <linux/acpi.h>
30 #include <acpi/acpi.h>
31 #include <asm/pci-direct.h>
32 #include <asm/iommu.h>
34 #include <asm/x86_init.h>
35 #include <asm/iommu_table.h>
37 #include "amd_iommu_proto.h"
38 #include "amd_iommu_types.h"
39 #include "irq_remapping.h"
42 * definitions for the ACPI scanning code
44 #define IVRS_HEADER_LENGTH 48
46 #define ACPI_IVHD_TYPE 0x10
47 #define ACPI_IVMD_TYPE_ALL 0x20
48 #define ACPI_IVMD_TYPE 0x21
49 #define ACPI_IVMD_TYPE_RANGE 0x22
51 #define IVHD_DEV_ALL 0x01
52 #define IVHD_DEV_SELECT 0x02
53 #define IVHD_DEV_SELECT_RANGE_START 0x03
54 #define IVHD_DEV_RANGE_END 0x04
55 #define IVHD_DEV_ALIAS 0x42
56 #define IVHD_DEV_ALIAS_RANGE 0x43
57 #define IVHD_DEV_EXT_SELECT 0x46
58 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
59 #define IVHD_DEV_SPECIAL 0x48
61 #define IVHD_SPECIAL_IOAPIC 1
62 #define IVHD_SPECIAL_HPET 2
64 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
65 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
66 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
67 #define IVHD_FLAG_ISOC_EN_MASK 0x08
69 #define IVMD_FLAG_EXCL_RANGE 0x08
70 #define IVMD_FLAG_UNITY_MAP 0x01
72 #define ACPI_DEVFLAG_INITPASS 0x01
73 #define ACPI_DEVFLAG_EXTINT 0x02
74 #define ACPI_DEVFLAG_NMI 0x04
75 #define ACPI_DEVFLAG_SYSMGT1 0x10
76 #define ACPI_DEVFLAG_SYSMGT2 0x20
77 #define ACPI_DEVFLAG_LINT0 0x40
78 #define ACPI_DEVFLAG_LINT1 0x80
79 #define ACPI_DEVFLAG_ATSDIS 0x10000000
82 * ACPI table definitions
84 * These data structures are laid over the table to parse the important values
89 * structure describing one IOMMU in the ACPI table. Typically followed by one
90 * or more ivhd_entrys.
102 } __attribute__((packed));
105 * A device entry describing which devices a specific IOMMU translates and
106 * which requestor ids they use.
113 } __attribute__((packed));
116 * An AMD IOMMU memory definition structure. It defines things like exclusion
117 * ranges for devices and regions that should be unity mapped.
128 } __attribute__((packed));
131 bool amd_iommu_irq_remap __read_mostly;
133 static bool amd_iommu_detected;
134 static bool __initdata amd_iommu_disabled;
136 u16 amd_iommu_last_bdf; /* largest PCI device id we have
138 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
140 u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
142 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
145 /* Array to assign indices to IOMMUs*/
146 struct amd_iommu *amd_iommus[MAX_IOMMUS];
147 int amd_iommus_present;
149 /* IOMMUs have a non-present cache? */
150 bool amd_iommu_np_cache __read_mostly;
151 bool amd_iommu_iotlb_sup __read_mostly = true;
153 u32 amd_iommu_max_pasids __read_mostly = ~0;
155 bool amd_iommu_v2_present __read_mostly;
157 bool amd_iommu_force_isolation __read_mostly;
160 * List of protection domains - used during resume
162 LIST_HEAD(amd_iommu_pd_list);
163 spinlock_t amd_iommu_pd_lock;
166 * Pointer to the device table which is shared by all AMD IOMMUs
167 * it is indexed by the PCI device id or the HT unit id and contains
168 * information about the domain the device belongs to as well as the
169 * page table root pointer.
171 struct dev_table_entry *amd_iommu_dev_table;
174 * The alias table is a driver specific data structure which contains the
175 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
176 * More than one device can share the same requestor id.
178 u16 *amd_iommu_alias_table;
181 * The rlookup table is used to find the IOMMU which is responsible
182 * for a specific device. It is also indexed by the PCI device id.
184 struct amd_iommu **amd_iommu_rlookup_table;
187 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
188 * to know which ones are already in use.
190 unsigned long *amd_iommu_pd_alloc_bitmap;
192 static u32 dev_table_size; /* size of the device table */
193 static u32 alias_table_size; /* size of the alias table */
194 static u32 rlookup_table_size; /* size if the rlookup table */
196 enum iommu_init_state {
209 static enum iommu_init_state init_state = IOMMU_START_STATE;
211 static int amd_iommu_enable_interrupts(void);
212 static int __init iommu_go_to_state(enum iommu_init_state state);
214 static inline void update_last_devid(u16 devid)
216 if (devid > amd_iommu_last_bdf)
217 amd_iommu_last_bdf = devid;
220 static inline unsigned long tbl_size(int entry_size)
222 unsigned shift = PAGE_SHIFT +
223 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
228 /* Access to l1 and l2 indexed register spaces */
230 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
234 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
235 pci_read_config_dword(iommu->dev, 0xfc, &val);
239 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
241 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
242 pci_write_config_dword(iommu->dev, 0xfc, val);
243 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
246 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
250 pci_write_config_dword(iommu->dev, 0xf0, address);
251 pci_read_config_dword(iommu->dev, 0xf4, &val);
255 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
257 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
258 pci_write_config_dword(iommu->dev, 0xf4, val);
261 /****************************************************************************
263 * AMD IOMMU MMIO register space handling functions
265 * These functions are used to program the IOMMU device registers in
266 * MMIO space required for that driver.
268 ****************************************************************************/
271 * This function set the exclusion range in the IOMMU. DMA accesses to the
272 * exclusion range are passed through untranslated
274 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
276 u64 start = iommu->exclusion_start & PAGE_MASK;
277 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
280 if (!iommu->exclusion_start)
283 entry = start | MMIO_EXCL_ENABLE_MASK;
284 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
285 &entry, sizeof(entry));
288 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
289 &entry, sizeof(entry));
292 /* Programs the physical address of the device table into the IOMMU hardware */
293 static void iommu_set_device_table(struct amd_iommu *iommu)
297 BUG_ON(iommu->mmio_base == NULL);
299 entry = virt_to_phys(amd_iommu_dev_table);
300 entry |= (dev_table_size >> 12) - 1;
301 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
302 &entry, sizeof(entry));
305 /* Generic functions to enable/disable certain features of the IOMMU. */
306 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
310 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
312 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
315 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
319 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
321 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
324 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
328 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
329 ctrl &= ~CTRL_INV_TO_MASK;
330 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
331 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
334 /* Function to enable the hardware */
335 static void iommu_enable(struct amd_iommu *iommu)
337 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
340 static void iommu_disable(struct amd_iommu *iommu)
342 /* Disable command buffer */
343 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
345 /* Disable event logging and event interrupts */
346 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
347 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
349 /* Disable IOMMU hardware itself */
350 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
354 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
355 * the system has one.
357 static u8 __iomem * __init iommu_map_mmio_space(u64 address)
359 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
360 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
362 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
366 return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
369 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
371 if (iommu->mmio_base)
372 iounmap(iommu->mmio_base);
373 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
376 /****************************************************************************
378 * The functions below belong to the first pass of AMD IOMMU ACPI table
379 * parsing. In this pass we try to find out the highest device id this
380 * code has to handle. Upon this information the size of the shared data
381 * structures is determined later.
383 ****************************************************************************/
386 * This function calculates the length of a given IVHD entry
388 static inline int ivhd_entry_length(u8 *ivhd)
390 return 0x04 << (*ivhd >> 6);
394 * This function reads the last device id the IOMMU has to handle from the PCI
395 * capability header for this IOMMU
397 static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
401 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
402 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
408 * After reading the highest device id from the IOMMU PCI capability header
409 * this function looks if there is a higher device id defined in the ACPI table
411 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
413 u8 *p = (void *)h, *end = (void *)h;
414 struct ivhd_entry *dev;
419 find_last_devid_on_pci(PCI_BUS(h->devid),
425 dev = (struct ivhd_entry *)p;
427 case IVHD_DEV_SELECT:
428 case IVHD_DEV_RANGE_END:
430 case IVHD_DEV_EXT_SELECT:
431 /* all the above subfield types refer to device ids */
432 update_last_devid(dev->devid);
437 p += ivhd_entry_length(p);
446 * Iterate over all IVHD entries in the ACPI table and find the highest device
447 * id which we need to handle. This is the first of three functions which parse
448 * the ACPI table. So we check the checksum here.
450 static int __init find_last_devid_acpi(struct acpi_table_header *table)
453 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
454 struct ivhd_header *h;
457 * Validate checksum here so we don't need to do it when
458 * we actually parse the table
460 for (i = 0; i < table->length; ++i)
463 /* ACPI table corrupt */
466 p += IVRS_HEADER_LENGTH;
468 end += table->length;
470 h = (struct ivhd_header *)p;
473 find_last_devid_from_ivhd(h);
485 /****************************************************************************
487 * The following functions belong the the code path which parses the ACPI table
488 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
489 * data structures, initialize the device/alias/rlookup table and also
490 * basically initialize the hardware.
492 ****************************************************************************/
495 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
496 * write commands to that buffer later and the IOMMU will execute them
499 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
501 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
502 get_order(CMD_BUFFER_SIZE));
507 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
513 * This function resets the command buffer if the IOMMU stopped fetching
516 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
518 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
520 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
521 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
523 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
527 * This function writes the command buffer address to the hardware and
530 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
534 BUG_ON(iommu->cmd_buf == NULL);
536 entry = (u64)virt_to_phys(iommu->cmd_buf);
537 entry |= MMIO_CMD_SIZE_512;
539 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
540 &entry, sizeof(entry));
542 amd_iommu_reset_cmd_buffer(iommu);
543 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
546 static void __init free_command_buffer(struct amd_iommu *iommu)
548 free_pages((unsigned long)iommu->cmd_buf,
549 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
552 /* allocates the memory where the IOMMU will log its events to */
553 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
555 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
556 get_order(EVT_BUFFER_SIZE));
558 if (iommu->evt_buf == NULL)
561 iommu->evt_buf_size = EVT_BUFFER_SIZE;
563 return iommu->evt_buf;
566 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
570 BUG_ON(iommu->evt_buf == NULL);
572 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
574 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
575 &entry, sizeof(entry));
577 /* set head and tail to zero manually */
578 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
579 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
581 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
584 static void __init free_event_buffer(struct amd_iommu *iommu)
586 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
589 /* allocates the memory where the IOMMU will log its events to */
590 static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
592 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
593 get_order(PPR_LOG_SIZE));
595 if (iommu->ppr_log == NULL)
598 return iommu->ppr_log;
601 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
605 if (iommu->ppr_log == NULL)
608 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
610 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
611 &entry, sizeof(entry));
613 /* set head and tail to zero manually */
614 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
615 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
617 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
618 iommu_feature_enable(iommu, CONTROL_PPR_EN);
621 static void __init free_ppr_log(struct amd_iommu *iommu)
623 if (iommu->ppr_log == NULL)
626 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
629 static void iommu_enable_gt(struct amd_iommu *iommu)
631 if (!iommu_feature(iommu, FEATURE_GT))
634 iommu_feature_enable(iommu, CONTROL_GT_EN);
637 /* sets a specific bit in the device table entry. */
638 static void set_dev_entry_bit(u16 devid, u8 bit)
640 int i = (bit >> 6) & 0x03;
641 int _bit = bit & 0x3f;
643 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
646 static int get_dev_entry_bit(u16 devid, u8 bit)
648 int i = (bit >> 6) & 0x03;
649 int _bit = bit & 0x3f;
651 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
655 void amd_iommu_apply_erratum_63(u16 devid)
659 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
660 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
663 set_dev_entry_bit(devid, DEV_ENTRY_IW);
666 /* Writes the specific IOMMU for a device into the rlookup table */
667 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
669 amd_iommu_rlookup_table[devid] = iommu;
673 * This function takes the device specific flags read from the ACPI
674 * table and sets up the device table entry with that information
676 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
677 u16 devid, u32 flags, u32 ext_flags)
679 if (flags & ACPI_DEVFLAG_INITPASS)
680 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
681 if (flags & ACPI_DEVFLAG_EXTINT)
682 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
683 if (flags & ACPI_DEVFLAG_NMI)
684 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
685 if (flags & ACPI_DEVFLAG_SYSMGT1)
686 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
687 if (flags & ACPI_DEVFLAG_SYSMGT2)
688 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
689 if (flags & ACPI_DEVFLAG_LINT0)
690 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
691 if (flags & ACPI_DEVFLAG_LINT1)
692 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
694 amd_iommu_apply_erratum_63(devid);
696 set_iommu_for_device(iommu, devid);
699 static int add_special_device(u8 type, u8 id, u16 devid)
701 struct devid_map *entry;
702 struct list_head *list;
704 if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
707 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
712 entry->devid = devid;
714 if (type == IVHD_SPECIAL_IOAPIC)
719 list_add_tail(&entry->list, list);
725 * Reads the device exclusion range from ACPI and initialize IOMMU with
728 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
730 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
732 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
737 * We only can configure exclusion ranges per IOMMU, not
738 * per device. But we can enable the exclusion range per
739 * device. This is done here
741 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
742 iommu->exclusion_start = m->range_start;
743 iommu->exclusion_length = m->range_length;
748 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
749 * initializes the hardware and our data structures with it.
751 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
752 struct ivhd_header *h)
755 u8 *end = p, flags = 0;
756 u16 devid = 0, devid_start = 0, devid_to = 0;
757 u32 dev_i, ext_flags = 0;
759 struct ivhd_entry *e;
762 * First save the recommended feature enable bits from ACPI
764 iommu->acpi_flags = h->flags;
767 * Done. Now parse the device entries
769 p += sizeof(struct ivhd_header);
774 e = (struct ivhd_entry *)p;
778 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
779 " last device %02x:%02x.%x flags: %02x\n",
780 PCI_BUS(iommu->first_device),
781 PCI_SLOT(iommu->first_device),
782 PCI_FUNC(iommu->first_device),
783 PCI_BUS(iommu->last_device),
784 PCI_SLOT(iommu->last_device),
785 PCI_FUNC(iommu->last_device),
788 for (dev_i = iommu->first_device;
789 dev_i <= iommu->last_device; ++dev_i)
790 set_dev_entry_from_acpi(iommu, dev_i,
793 case IVHD_DEV_SELECT:
795 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
803 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
805 case IVHD_DEV_SELECT_RANGE_START:
807 DUMP_printk(" DEV_SELECT_RANGE_START\t "
808 "devid: %02x:%02x.%x flags: %02x\n",
814 devid_start = e->devid;
821 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
822 "flags: %02x devid_to: %02x:%02x.%x\n",
827 PCI_BUS(e->ext >> 8),
828 PCI_SLOT(e->ext >> 8),
829 PCI_FUNC(e->ext >> 8));
832 devid_to = e->ext >> 8;
833 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
834 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
835 amd_iommu_alias_table[devid] = devid_to;
837 case IVHD_DEV_ALIAS_RANGE:
839 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
840 "devid: %02x:%02x.%x flags: %02x "
841 "devid_to: %02x:%02x.%x\n",
846 PCI_BUS(e->ext >> 8),
847 PCI_SLOT(e->ext >> 8),
848 PCI_FUNC(e->ext >> 8));
850 devid_start = e->devid;
852 devid_to = e->ext >> 8;
856 case IVHD_DEV_EXT_SELECT:
858 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
859 "flags: %02x ext: %08x\n",
866 set_dev_entry_from_acpi(iommu, devid, e->flags,
869 case IVHD_DEV_EXT_SELECT_RANGE:
871 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
872 "%02x:%02x.%x flags: %02x ext: %08x\n",
878 devid_start = e->devid;
883 case IVHD_DEV_RANGE_END:
885 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
891 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
893 amd_iommu_alias_table[dev_i] = devid_to;
894 set_dev_entry_from_acpi(iommu,
895 devid_to, flags, ext_flags);
897 set_dev_entry_from_acpi(iommu, dev_i,
901 case IVHD_DEV_SPECIAL: {
907 handle = e->ext & 0xff;
908 devid = (e->ext >> 8) & 0xffff;
909 type = (e->ext >> 24) & 0xff;
911 if (type == IVHD_SPECIAL_IOAPIC)
913 else if (type == IVHD_SPECIAL_HPET)
918 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
924 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
925 ret = add_special_device(type, handle, devid);
934 p += ivhd_entry_length(p);
940 /* Initializes the device->iommu mapping for the driver */
941 static int __init init_iommu_devices(struct amd_iommu *iommu)
945 for (i = iommu->first_device; i <= iommu->last_device; ++i)
946 set_iommu_for_device(iommu, i);
951 static void __init free_iommu_one(struct amd_iommu *iommu)
953 free_command_buffer(iommu);
954 free_event_buffer(iommu);
956 iommu_unmap_mmio_space(iommu);
959 static void __init free_iommu_all(void)
961 struct amd_iommu *iommu, *next;
963 for_each_iommu_safe(iommu, next) {
964 list_del(&iommu->list);
965 free_iommu_one(iommu);
971 * This function clues the initialization function for one IOMMU
972 * together and also allocates the command buffer and programs the
973 * hardware. It does NOT enable the IOMMU. This is done afterwards.
975 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
979 spin_lock_init(&iommu->lock);
981 /* Add IOMMU to internal data structures */
982 list_add_tail(&iommu->list, &amd_iommu_list);
983 iommu->index = amd_iommus_present++;
985 if (unlikely(iommu->index >= MAX_IOMMUS)) {
986 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
990 /* Index is fine - add IOMMU to the array */
991 amd_iommus[iommu->index] = iommu;
994 * Copy data from ACPI table entry to the iommu struct
996 iommu->devid = h->devid;
997 iommu->cap_ptr = h->cap_ptr;
998 iommu->pci_seg = h->pci_seg;
999 iommu->mmio_phys = h->mmio_phys;
1000 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
1001 if (!iommu->mmio_base)
1004 iommu->cmd_buf = alloc_command_buffer(iommu);
1005 if (!iommu->cmd_buf)
1008 iommu->evt_buf = alloc_event_buffer(iommu);
1009 if (!iommu->evt_buf)
1012 iommu->int_enabled = false;
1014 ret = init_iommu_from_acpi(iommu, h);
1017 init_iommu_devices(iommu);
1023 * Iterates over all IOMMU entries in the ACPI table, allocates the
1024 * IOMMU structure and initializes it with init_iommu_one()
1026 static int __init init_iommu_all(struct acpi_table_header *table)
1028 u8 *p = (u8 *)table, *end = (u8 *)table;
1029 struct ivhd_header *h;
1030 struct amd_iommu *iommu;
1033 end += table->length;
1034 p += IVRS_HEADER_LENGTH;
1037 h = (struct ivhd_header *)p;
1039 case ACPI_IVHD_TYPE:
1041 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1042 "seg: %d flags: %01x info %04x\n",
1043 PCI_BUS(h->devid), PCI_SLOT(h->devid),
1044 PCI_FUNC(h->devid), h->cap_ptr,
1045 h->pci_seg, h->flags, h->info);
1046 DUMP_printk(" mmio-addr: %016llx\n",
1049 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1053 ret = init_iommu_one(iommu, h);
1068 static int iommu_init_pci(struct amd_iommu *iommu)
1070 int cap_ptr = iommu->cap_ptr;
1071 u32 range, misc, low, high;
1073 iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
1074 iommu->devid & 0xff);
1078 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1080 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1082 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1085 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
1086 MMIO_GET_FD(range));
1087 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
1088 MMIO_GET_LD(range));
1090 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1091 amd_iommu_iotlb_sup = false;
1093 /* read extended feature bits */
1094 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1095 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1097 iommu->features = ((u64)high << 32) | low;
1099 if (iommu_feature(iommu, FEATURE_GT)) {
1104 shift = iommu->features & FEATURE_PASID_MASK;
1105 shift >>= FEATURE_PASID_SHIFT;
1106 pasids = (1 << shift);
1108 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
1110 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1111 glxval >>= FEATURE_GLXVAL_SHIFT;
1113 if (amd_iommu_max_glx_val == -1)
1114 amd_iommu_max_glx_val = glxval;
1116 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1119 if (iommu_feature(iommu, FEATURE_GT) &&
1120 iommu_feature(iommu, FEATURE_PPR)) {
1121 iommu->is_iommu_v2 = true;
1122 amd_iommu_v2_present = true;
1125 if (iommu_feature(iommu, FEATURE_PPR)) {
1126 iommu->ppr_log = alloc_ppr_log(iommu);
1127 if (!iommu->ppr_log)
1131 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1132 amd_iommu_np_cache = true;
1134 if (is_rd890_iommu(iommu->dev)) {
1137 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1141 * Some rd890 systems may not be fully reconfigured by the
1142 * BIOS, so it's necessary for us to store this information so
1143 * it can be reprogrammed on resume
1145 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1146 &iommu->stored_addr_lo);
1147 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1148 &iommu->stored_addr_hi);
1150 /* Low bit locks writes to configuration space */
1151 iommu->stored_addr_lo &= ~1;
1153 for (i = 0; i < 6; i++)
1154 for (j = 0; j < 0x12; j++)
1155 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1157 for (i = 0; i < 0x83; i++)
1158 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1161 return pci_enable_device(iommu->dev);
1164 static void print_iommu_info(void)
1166 static const char * const feat_str[] = {
1167 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1168 "IA", "GA", "HE", "PC"
1170 struct amd_iommu *iommu;
1172 for_each_iommu(iommu) {
1175 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1176 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1178 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1179 pr_info("AMD-Vi: Extended features: ");
1180 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1181 if (iommu_feature(iommu, (1ULL << i)))
1182 pr_cont(" %s", feat_str[i]);
1189 static int __init amd_iommu_init_pci(void)
1191 struct amd_iommu *iommu;
1194 for_each_iommu(iommu) {
1195 ret = iommu_init_pci(iommu);
1200 ret = amd_iommu_init_devices();
1207 /****************************************************************************
1209 * The following functions initialize the MSI interrupts for all IOMMUs
1210 * in the system. Its a bit challenging because there could be multiple
1211 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1214 ****************************************************************************/
1216 static int iommu_setup_msi(struct amd_iommu *iommu)
1220 r = pci_enable_msi(iommu->dev);
1224 r = request_threaded_irq(iommu->dev->irq,
1225 amd_iommu_int_handler,
1226 amd_iommu_int_thread,
1231 pci_disable_msi(iommu->dev);
1235 iommu->int_enabled = true;
1240 static int iommu_init_msi(struct amd_iommu *iommu)
1244 if (iommu->int_enabled)
1247 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1248 ret = iommu_setup_msi(iommu);
1256 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1258 if (iommu->ppr_log != NULL)
1259 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1264 /****************************************************************************
1266 * The next functions belong to the third pass of parsing the ACPI
1267 * table. In this last pass the memory mapping requirements are
1268 * gathered (like exclusion and unity mapping reanges).
1270 ****************************************************************************/
1272 static void __init free_unity_maps(void)
1274 struct unity_map_entry *entry, *next;
1276 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1277 list_del(&entry->list);
1282 /* called when we find an exclusion range definition in ACPI */
1283 static int __init init_exclusion_range(struct ivmd_header *m)
1288 case ACPI_IVMD_TYPE:
1289 set_device_exclusion_range(m->devid, m);
1291 case ACPI_IVMD_TYPE_ALL:
1292 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1293 set_device_exclusion_range(i, m);
1295 case ACPI_IVMD_TYPE_RANGE:
1296 for (i = m->devid; i <= m->aux; ++i)
1297 set_device_exclusion_range(i, m);
1306 /* called for unity map ACPI definition */
1307 static int __init init_unity_map_range(struct ivmd_header *m)
1309 struct unity_map_entry *e = NULL;
1312 e = kzalloc(sizeof(*e), GFP_KERNEL);
1320 case ACPI_IVMD_TYPE:
1321 s = "IVMD_TYPEi\t\t\t";
1322 e->devid_start = e->devid_end = m->devid;
1324 case ACPI_IVMD_TYPE_ALL:
1325 s = "IVMD_TYPE_ALL\t\t";
1327 e->devid_end = amd_iommu_last_bdf;
1329 case ACPI_IVMD_TYPE_RANGE:
1330 s = "IVMD_TYPE_RANGE\t\t";
1331 e->devid_start = m->devid;
1332 e->devid_end = m->aux;
1335 e->address_start = PAGE_ALIGN(m->range_start);
1336 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1337 e->prot = m->flags >> 1;
1339 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1340 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1341 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1342 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1343 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1344 e->address_start, e->address_end, m->flags);
1346 list_add_tail(&e->list, &amd_iommu_unity_map);
1351 /* iterates over all memory definitions we find in the ACPI table */
1352 static int __init init_memory_definitions(struct acpi_table_header *table)
1354 u8 *p = (u8 *)table, *end = (u8 *)table;
1355 struct ivmd_header *m;
1357 end += table->length;
1358 p += IVRS_HEADER_LENGTH;
1361 m = (struct ivmd_header *)p;
1362 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1363 init_exclusion_range(m);
1364 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1365 init_unity_map_range(m);
1374 * Init the device table to not allow DMA access for devices and
1375 * suppress all page faults
1377 static void init_device_table(void)
1381 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1382 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1383 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1387 static void iommu_init_flags(struct amd_iommu *iommu)
1389 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1390 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1391 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1393 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1394 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1395 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1397 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1398 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1399 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1401 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1402 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1403 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1406 * make IOMMU memory accesses cache coherent
1408 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1410 /* Set IOTLB invalidation timeout to 1s */
1411 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1414 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1417 u32 ioc_feature_control;
1418 struct pci_dev *pdev = iommu->root_pdev;
1420 /* RD890 BIOSes may not have completely reconfigured the iommu */
1421 if (!is_rd890_iommu(iommu->dev) || !pdev)
1425 * First, we need to ensure that the iommu is enabled. This is
1426 * controlled by a register in the northbridge
1429 /* Select Northbridge indirect register 0x75 and enable writing */
1430 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1431 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1433 /* Enable the iommu */
1434 if (!(ioc_feature_control & 0x1))
1435 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1437 /* Restore the iommu BAR */
1438 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1439 iommu->stored_addr_lo);
1440 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1441 iommu->stored_addr_hi);
1443 /* Restore the l1 indirect regs for each of the 6 l1s */
1444 for (i = 0; i < 6; i++)
1445 for (j = 0; j < 0x12; j++)
1446 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1448 /* Restore the l2 indirect regs */
1449 for (i = 0; i < 0x83; i++)
1450 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1452 /* Lock PCI setup registers */
1453 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1454 iommu->stored_addr_lo | 1);
1458 * This function finally enables all IOMMUs found in the system after
1459 * they have been initialized
1461 static void early_enable_iommus(void)
1463 struct amd_iommu *iommu;
1465 for_each_iommu(iommu) {
1466 iommu_disable(iommu);
1467 iommu_init_flags(iommu);
1468 iommu_set_device_table(iommu);
1469 iommu_enable_command_buffer(iommu);
1470 iommu_enable_event_buffer(iommu);
1471 iommu_set_exclusion_range(iommu);
1472 iommu_enable(iommu);
1473 iommu_flush_all_caches(iommu);
1477 static void enable_iommus_v2(void)
1479 struct amd_iommu *iommu;
1481 for_each_iommu(iommu) {
1482 iommu_enable_ppr_log(iommu);
1483 iommu_enable_gt(iommu);
1487 static void enable_iommus(void)
1489 early_enable_iommus();
1494 static void disable_iommus(void)
1496 struct amd_iommu *iommu;
1498 for_each_iommu(iommu)
1499 iommu_disable(iommu);
1503 * Suspend/Resume support
1504 * disable suspend until real resume implemented
1507 static void amd_iommu_resume(void)
1509 struct amd_iommu *iommu;
1511 for_each_iommu(iommu)
1512 iommu_apply_resume_quirks(iommu);
1514 /* re-load the hardware */
1517 amd_iommu_enable_interrupts();
1520 static int amd_iommu_suspend(void)
1522 /* disable IOMMUs to go out of the way for BIOS */
1528 static struct syscore_ops amd_iommu_syscore_ops = {
1529 .suspend = amd_iommu_suspend,
1530 .resume = amd_iommu_resume,
1533 static void __init free_on_init_error(void)
1535 if (amd_iommu_irq_cache) {
1536 kmem_cache_destroy(amd_iommu_irq_cache);
1537 amd_iommu_irq_cache = NULL;
1540 amd_iommu_uninit_devices();
1542 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1543 get_order(MAX_DOMAIN_ID/8));
1545 free_pages((unsigned long)amd_iommu_rlookup_table,
1546 get_order(rlookup_table_size));
1548 free_pages((unsigned long)amd_iommu_alias_table,
1549 get_order(alias_table_size));
1551 free_pages((unsigned long)amd_iommu_dev_table,
1552 get_order(dev_table_size));
1558 #ifdef CONFIG_GART_IOMMU
1560 * We failed to initialize the AMD IOMMU - try fallback to GART
1569 * This is the hardware init function for AMD IOMMU in the system.
1570 * This function is called either from amd_iommu_init or from the interrupt
1571 * remapping setup code.
1573 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1576 * 1 pass) Find the highest PCI device id the driver has to handle.
1577 * Upon this information the size of the data structures is
1578 * determined that needs to be allocated.
1580 * 2 pass) Initialize the data structures just allocated with the
1581 * information in the ACPI table about available AMD IOMMUs
1582 * in the system. It also maps the PCI devices in the
1583 * system to specific IOMMUs
1585 * 3 pass) After the basic data structures are allocated and
1586 * initialized we update them with information about memory
1587 * remapping requirements parsed out of the ACPI table in
1590 * After everything is set up the IOMMUs are enabled and the necessary
1591 * hotplug and suspend notifiers are registered.
1593 static int __init early_amd_iommu_init(void)
1595 struct acpi_table_header *ivrs_base;
1596 acpi_size ivrs_size;
1600 if (!amd_iommu_detected)
1603 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1604 if (status == AE_NOT_FOUND)
1606 else if (ACPI_FAILURE(status)) {
1607 const char *err = acpi_format_exception(status);
1608 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1613 * First parse ACPI tables to find the largest Bus/Dev/Func
1614 * we need to handle. Upon this information the shared data
1615 * structures for the IOMMUs in the system will be allocated
1617 ret = find_last_devid_acpi(ivrs_base);
1621 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1622 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1623 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1625 /* Device table - directly used by all IOMMUs */
1627 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1628 get_order(dev_table_size));
1629 if (amd_iommu_dev_table == NULL)
1633 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1634 * IOMMU see for that device
1636 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1637 get_order(alias_table_size));
1638 if (amd_iommu_alias_table == NULL)
1641 /* IOMMU rlookup table - find the IOMMU for a specific device */
1642 amd_iommu_rlookup_table = (void *)__get_free_pages(
1643 GFP_KERNEL | __GFP_ZERO,
1644 get_order(rlookup_table_size));
1645 if (amd_iommu_rlookup_table == NULL)
1648 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1649 GFP_KERNEL | __GFP_ZERO,
1650 get_order(MAX_DOMAIN_ID/8));
1651 if (amd_iommu_pd_alloc_bitmap == NULL)
1654 /* init the device table */
1655 init_device_table();
1658 * let all alias entries point to itself
1660 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1661 amd_iommu_alias_table[i] = i;
1664 * never allocate domain 0 because its used as the non-allocated and
1665 * error value placeholder
1667 amd_iommu_pd_alloc_bitmap[0] = 1;
1669 spin_lock_init(&amd_iommu_pd_lock);
1672 * now the data structures are allocated and basically initialized
1673 * start the real acpi table scan
1675 ret = init_iommu_all(ivrs_base);
1679 if (amd_iommu_irq_remap) {
1681 * Interrupt remapping enabled, create kmem_cache for the
1684 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1685 MAX_IRQS_PER_TABLE * sizeof(u32),
1686 IRQ_TABLE_ALIGNMENT,
1688 if (!amd_iommu_irq_cache)
1692 ret = init_memory_definitions(ivrs_base);
1697 /* Don't leak any ACPI memory */
1698 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1704 static int amd_iommu_enable_interrupts(void)
1706 struct amd_iommu *iommu;
1709 for_each_iommu(iommu) {
1710 ret = iommu_init_msi(iommu);
1719 static bool detect_ivrs(void)
1721 struct acpi_table_header *ivrs_base;
1722 acpi_size ivrs_size;
1725 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1726 if (status == AE_NOT_FOUND)
1728 else if (ACPI_FAILURE(status)) {
1729 const char *err = acpi_format_exception(status);
1730 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1734 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1736 /* Make sure ACS will be enabled during PCI probe */
1739 if (!disable_irq_remap)
1740 amd_iommu_irq_remap = true;
1745 static int amd_iommu_init_dma(void)
1749 if (iommu_pass_through)
1750 ret = amd_iommu_init_passthrough();
1752 ret = amd_iommu_init_dma_ops();
1757 amd_iommu_init_api();
1759 amd_iommu_init_notifier();
1764 /****************************************************************************
1766 * AMD IOMMU Initialization State Machine
1768 ****************************************************************************/
1770 static int __init state_next(void)
1774 switch (init_state) {
1775 case IOMMU_START_STATE:
1776 if (!detect_ivrs()) {
1777 init_state = IOMMU_NOT_FOUND;
1780 init_state = IOMMU_IVRS_DETECTED;
1783 case IOMMU_IVRS_DETECTED:
1784 ret = early_amd_iommu_init();
1785 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
1787 case IOMMU_ACPI_FINISHED:
1788 early_enable_iommus();
1789 register_syscore_ops(&amd_iommu_syscore_ops);
1790 x86_platform.iommu_shutdown = disable_iommus;
1791 init_state = IOMMU_ENABLED;
1794 ret = amd_iommu_init_pci();
1795 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
1798 case IOMMU_PCI_INIT:
1799 ret = amd_iommu_enable_interrupts();
1800 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
1802 case IOMMU_INTERRUPTS_EN:
1803 ret = amd_iommu_init_dma();
1804 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
1807 init_state = IOMMU_INITIALIZED;
1809 case IOMMU_INITIALIZED:
1812 case IOMMU_NOT_FOUND:
1813 case IOMMU_INIT_ERROR:
1814 /* Error states => do nothing */
1825 static int __init iommu_go_to_state(enum iommu_init_state state)
1829 while (init_state != state) {
1831 if (init_state == IOMMU_NOT_FOUND ||
1832 init_state == IOMMU_INIT_ERROR)
1842 * This is the core init function for AMD IOMMU hardware in the system.
1843 * This function is called from the generic x86 DMA layer initialization
1846 static int __init amd_iommu_init(void)
1850 ret = iommu_go_to_state(IOMMU_INITIALIZED);
1853 free_on_init_error();
1859 /****************************************************************************
1861 * Early detect code. This code runs at IOMMU detection time in the DMA
1862 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1865 ****************************************************************************/
1866 int __init amd_iommu_detect(void)
1870 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1873 if (amd_iommu_disabled)
1876 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
1880 amd_iommu_detected = true;
1882 x86_init.iommu.iommu_init = amd_iommu_init;
1887 /****************************************************************************
1889 * Parsing functions for the AMD IOMMU specific kernel command line
1892 ****************************************************************************/
1894 static int __init parse_amd_iommu_dump(char *str)
1896 amd_iommu_dump = true;
1901 static int __init parse_amd_iommu_options(char *str)
1903 for (; *str; ++str) {
1904 if (strncmp(str, "fullflush", 9) == 0)
1905 amd_iommu_unmap_flush = true;
1906 if (strncmp(str, "off", 3) == 0)
1907 amd_iommu_disabled = true;
1908 if (strncmp(str, "force_isolation", 15) == 0)
1909 amd_iommu_force_isolation = true;
1915 __setup("amd_iommu_dump", parse_amd_iommu_dump);
1916 __setup("amd_iommu=", parse_amd_iommu_options);
1918 IOMMU_INIT_FINISH(amd_iommu_detect,
1919 gart_iommu_hole_init,
1923 bool amd_iommu_v2_supported(void)
1925 return amd_iommu_v2_present;
1927 EXPORT_SYMBOL(amd_iommu_v2_supported);