1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/irq.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/export.h>
22 #include <linux/kmemleak.h>
23 #include <linux/cc_platform.h>
24 #include <linux/iopoll.h>
25 #include <asm/pci-direct.h>
26 #include <asm/iommu.h>
29 #include <asm/x86_init.h>
30 #include <asm/io_apic.h>
31 #include <asm/irq_remapping.h>
32 #include <asm/set_memory.h>
34 #include <linux/crash_dump.h>
36 #include "amd_iommu.h"
37 #include "../irq_remapping.h"
40 * definitions for the ACPI scanning code
42 #define IVRS_HEADER_LENGTH 48
44 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
45 #define ACPI_IVMD_TYPE_ALL 0x20
46 #define ACPI_IVMD_TYPE 0x21
47 #define ACPI_IVMD_TYPE_RANGE 0x22
49 #define IVHD_DEV_ALL 0x01
50 #define IVHD_DEV_SELECT 0x02
51 #define IVHD_DEV_SELECT_RANGE_START 0x03
52 #define IVHD_DEV_RANGE_END 0x04
53 #define IVHD_DEV_ALIAS 0x42
54 #define IVHD_DEV_ALIAS_RANGE 0x43
55 #define IVHD_DEV_EXT_SELECT 0x46
56 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
57 #define IVHD_DEV_SPECIAL 0x48
58 #define IVHD_DEV_ACPI_HID 0xf0
60 #define UID_NOT_PRESENT 0
61 #define UID_IS_INTEGER 1
62 #define UID_IS_CHARACTER 2
64 #define IVHD_SPECIAL_IOAPIC 1
65 #define IVHD_SPECIAL_HPET 2
67 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
68 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
69 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
70 #define IVHD_FLAG_ISOC_EN_MASK 0x08
72 #define IVMD_FLAG_EXCL_RANGE 0x08
73 #define IVMD_FLAG_IW 0x04
74 #define IVMD_FLAG_IR 0x02
75 #define IVMD_FLAG_UNITY_MAP 0x01
77 #define ACPI_DEVFLAG_INITPASS 0x01
78 #define ACPI_DEVFLAG_EXTINT 0x02
79 #define ACPI_DEVFLAG_NMI 0x04
80 #define ACPI_DEVFLAG_SYSMGT1 0x10
81 #define ACPI_DEVFLAG_SYSMGT2 0x20
82 #define ACPI_DEVFLAG_LINT0 0x40
83 #define ACPI_DEVFLAG_LINT1 0x80
84 #define ACPI_DEVFLAG_ATSDIS 0x10000000
86 #define LOOP_TIMEOUT 2000000
88 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
89 | ((dev & 0x1f) << 3) | (fn & 0x7))
92 * ACPI table definitions
94 * These data structures are laid over the table to parse the important values
99 * structure describing one IOMMU in the ACPI table. Typically followed by one
100 * or more ivhd_entrys.
113 /* Following only valid on IVHD type 11h and 40h */
114 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
116 } __attribute__((packed));
119 * A device entry describing which devices a specific IOMMU translates and
120 * which requestor ids they use.
126 struct_group(ext_hid,
134 } __attribute__((packed));
137 * An AMD IOMMU memory definition structure. It defines things like exclusion
138 * ranges for devices and regions that should be unity mapped.
150 } __attribute__((packed));
153 bool amd_iommu_irq_remap __read_mostly;
155 enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
156 /* Guest page table level */
157 int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
159 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
160 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
162 static bool amd_iommu_detected;
163 static bool amd_iommu_disabled __initdata;
164 static bool amd_iommu_force_enable __initdata;
165 static int amd_iommu_target_ivhd_type;
167 /* Global EFR and EFR2 registers */
171 /* SNP is enabled on the system? */
172 bool amd_iommu_snp_en;
173 EXPORT_SYMBOL(amd_iommu_snp_en);
175 LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */
176 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
179 /* Array to assign indices to IOMMUs*/
180 struct amd_iommu *amd_iommus[MAX_IOMMUS];
182 /* Number of IOMMUs present in the system */
183 static int amd_iommus_present;
185 /* IOMMUs have a non-present cache? */
186 bool amd_iommu_np_cache __read_mostly;
187 bool amd_iommu_iotlb_sup __read_mostly = true;
189 u32 amd_iommu_max_pasid __read_mostly = ~0;
191 bool amd_iommu_v2_present __read_mostly;
192 static bool amd_iommu_pc_present __read_mostly;
193 bool amdr_ivrs_remap_support __read_mostly;
195 bool amd_iommu_force_isolation __read_mostly;
198 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
199 * to know which ones are already in use.
201 unsigned long *amd_iommu_pd_alloc_bitmap;
203 enum iommu_init_state {
213 IOMMU_CMDLINE_DISABLED,
216 /* Early ioapic and hpet maps from kernel command line */
217 #define EARLY_MAP_SIZE 4
218 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
219 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
220 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
222 static int __initdata early_ioapic_map_size;
223 static int __initdata early_hpet_map_size;
224 static int __initdata early_acpihid_map_size;
226 static bool __initdata cmdline_maps;
228 static enum iommu_init_state init_state = IOMMU_START_STATE;
230 static int amd_iommu_enable_interrupts(void);
231 static int __init iommu_go_to_state(enum iommu_init_state state);
232 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
234 static bool amd_iommu_pre_enabled = true;
236 static u32 amd_iommu_ivinfo __initdata;
238 bool translation_pre_enabled(struct amd_iommu *iommu)
240 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
243 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
245 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
248 static void init_translation_status(struct amd_iommu *iommu)
252 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
253 if (ctrl & (1<<CONTROL_IOMMU_EN))
254 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
257 static inline unsigned long tbl_size(int entry_size, int last_bdf)
259 unsigned shift = PAGE_SHIFT +
260 get_order((last_bdf + 1) * entry_size);
265 int amd_iommu_get_num_iommus(void)
267 return amd_iommus_present;
271 * Iterate through all the IOMMUs to get common EFR
272 * masks among all IOMMUs and warn if found inconsistency.
274 static void get_global_efr(void)
276 struct amd_iommu *iommu;
278 for_each_iommu(iommu) {
279 u64 tmp = iommu->features;
280 u64 tmp2 = iommu->features2;
282 if (list_is_first(&iommu->list, &amd_iommu_list)) {
284 amd_iommu_efr2 = tmp2;
288 if (amd_iommu_efr == tmp &&
289 amd_iommu_efr2 == tmp2)
293 "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n",
294 tmp, tmp2, amd_iommu_efr, amd_iommu_efr2,
295 iommu->index, iommu->pci_seg->id,
296 PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid),
297 PCI_FUNC(iommu->devid));
299 amd_iommu_efr &= tmp;
300 amd_iommu_efr2 &= tmp2;
303 pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
306 static bool check_feature_on_all_iommus(u64 mask)
308 return !!(amd_iommu_efr & mask);
311 static inline int check_feature_gpt_level(void)
313 return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
317 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
318 * Default to IVHD EFR since it is available sooner
319 * (i.e. before PCI init).
321 static void __init early_iommu_features_init(struct amd_iommu *iommu,
322 struct ivhd_header *h)
324 if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) {
325 iommu->features = h->efr_reg;
326 iommu->features2 = h->efr_reg2;
328 if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP)
329 amdr_ivrs_remap_support = true;
332 /* Access to l1 and l2 indexed register spaces */
334 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
338 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
339 pci_read_config_dword(iommu->dev, 0xfc, &val);
343 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
345 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
346 pci_write_config_dword(iommu->dev, 0xfc, val);
347 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
350 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
354 pci_write_config_dword(iommu->dev, 0xf0, address);
355 pci_read_config_dword(iommu->dev, 0xf4, &val);
359 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
361 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
362 pci_write_config_dword(iommu->dev, 0xf4, val);
365 /****************************************************************************
367 * AMD IOMMU MMIO register space handling functions
369 * These functions are used to program the IOMMU device registers in
370 * MMIO space required for that driver.
372 ****************************************************************************/
375 * This function set the exclusion range in the IOMMU. DMA accesses to the
376 * exclusion range are passed through untranslated
378 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
380 u64 start = iommu->exclusion_start & PAGE_MASK;
381 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
384 if (!iommu->exclusion_start)
387 entry = start | MMIO_EXCL_ENABLE_MASK;
388 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
389 &entry, sizeof(entry));
392 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
393 &entry, sizeof(entry));
396 static void iommu_set_cwwb_range(struct amd_iommu *iommu)
398 u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
399 u64 entry = start & PM_ADDR_MASK;
401 if (!check_feature_on_all_iommus(FEATURE_SNP))
405 * Re-purpose Exclusion base/limit registers for Completion wait
406 * write-back base/limit.
408 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
409 &entry, sizeof(entry));
412 * Default to 4 Kbytes, which can be specified by setting base
413 * address equal to the limit address.
415 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
416 &entry, sizeof(entry));
419 /* Programs the physical address of the device table into the IOMMU hardware */
420 static void iommu_set_device_table(struct amd_iommu *iommu)
423 u32 dev_table_size = iommu->pci_seg->dev_table_size;
424 void *dev_table = (void *)get_dev_table(iommu);
426 BUG_ON(iommu->mmio_base == NULL);
428 entry = iommu_virt_to_phys(dev_table);
429 entry |= (dev_table_size >> 12) - 1;
430 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
431 &entry, sizeof(entry));
434 /* Generic functions to enable/disable certain features of the IOMMU. */
435 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
439 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
440 ctrl |= (1ULL << bit);
441 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
444 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
448 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
449 ctrl &= ~(1ULL << bit);
450 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
453 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
457 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
458 ctrl &= ~CTRL_INV_TO_MASK;
459 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
460 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
463 /* Function to enable the hardware */
464 static void iommu_enable(struct amd_iommu *iommu)
466 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
469 static void iommu_disable(struct amd_iommu *iommu)
471 if (!iommu->mmio_base)
474 /* Disable command buffer */
475 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
477 /* Disable event logging and event interrupts */
478 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
479 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
481 /* Disable IOMMU GA_LOG */
482 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
483 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
485 /* Disable IOMMU hardware itself */
486 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
490 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
491 * the system has one.
493 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
495 if (!request_mem_region(address, end, "amd_iommu")) {
496 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
498 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
502 return (u8 __iomem *)ioremap(address, end);
505 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
507 if (iommu->mmio_base)
508 iounmap(iommu->mmio_base);
509 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
512 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
528 /****************************************************************************
530 * The functions below belong to the first pass of AMD IOMMU ACPI table
531 * parsing. In this pass we try to find out the highest device id this
532 * code has to handle. Upon this information the size of the shared data
533 * structures is determined later.
535 ****************************************************************************/
538 * This function calculates the length of a given IVHD entry
540 static inline int ivhd_entry_length(u8 *ivhd)
542 u32 type = ((struct ivhd_entry *)ivhd)->type;
545 return 0x04 << (*ivhd >> 6);
546 } else if (type == IVHD_DEV_ACPI_HID) {
547 /* For ACPI_HID, offset 21 is uid len */
548 return *((u8 *)ivhd + 21) + 22;
554 * After reading the highest device id from the IOMMU PCI capability header
555 * this function looks if there is a higher device id defined in the ACPI table
557 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
559 u8 *p = (void *)h, *end = (void *)h;
560 struct ivhd_entry *dev;
561 int last_devid = -EINVAL;
563 u32 ivhd_size = get_ivhd_header_size(h);
566 pr_err("Unsupported IVHD type %#x\n", h->type);
574 dev = (struct ivhd_entry *)p;
577 /* Use maximum BDF value for DEV_ALL */
579 case IVHD_DEV_SELECT:
580 case IVHD_DEV_RANGE_END:
582 case IVHD_DEV_EXT_SELECT:
583 /* all the above subfield types refer to device ids */
584 if (dev->devid > last_devid)
585 last_devid = dev->devid;
590 p += ivhd_entry_length(p);
598 static int __init check_ivrs_checksum(struct acpi_table_header *table)
601 u8 checksum = 0, *p = (u8 *)table;
603 for (i = 0; i < table->length; ++i)
606 /* ACPI table corrupt */
607 pr_err(FW_BUG "IVRS invalid checksum\n");
615 * Iterate over all IVHD entries in the ACPI table and find the highest device
616 * id which we need to handle. This is the first of three functions which parse
617 * the ACPI table. So we check the checksum here.
619 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
621 u8 *p = (u8 *)table, *end = (u8 *)table;
622 struct ivhd_header *h;
623 int last_devid, last_bdf = 0;
625 p += IVRS_HEADER_LENGTH;
627 end += table->length;
629 h = (struct ivhd_header *)p;
630 if (h->pci_seg == pci_seg &&
631 h->type == amd_iommu_target_ivhd_type) {
632 last_devid = find_last_devid_from_ivhd(h);
636 if (last_devid > last_bdf)
637 last_bdf = last_devid;
646 /****************************************************************************
648 * The following functions belong to the code path which parses the ACPI table
649 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
650 * data structures, initialize the per PCI segment device/alias/rlookup table
651 * and also basically initialize the hardware.
653 ****************************************************************************/
655 /* Allocate per PCI segment device table */
656 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
658 pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
659 get_order(pci_seg->dev_table_size));
660 if (!pci_seg->dev_table)
666 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
668 free_pages((unsigned long)pci_seg->dev_table,
669 get_order(pci_seg->dev_table_size));
670 pci_seg->dev_table = NULL;
673 /* Allocate per PCI segment IOMMU rlookup table. */
674 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
676 pci_seg->rlookup_table = (void *)__get_free_pages(
677 GFP_KERNEL | __GFP_ZERO,
678 get_order(pci_seg->rlookup_table_size));
679 if (pci_seg->rlookup_table == NULL)
685 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
687 free_pages((unsigned long)pci_seg->rlookup_table,
688 get_order(pci_seg->rlookup_table_size));
689 pci_seg->rlookup_table = NULL;
692 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
694 pci_seg->irq_lookup_table = (void *)__get_free_pages(
695 GFP_KERNEL | __GFP_ZERO,
696 get_order(pci_seg->rlookup_table_size));
697 kmemleak_alloc(pci_seg->irq_lookup_table,
698 pci_seg->rlookup_table_size, 1, GFP_KERNEL);
699 if (pci_seg->irq_lookup_table == NULL)
705 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
707 kmemleak_free(pci_seg->irq_lookup_table);
708 free_pages((unsigned long)pci_seg->irq_lookup_table,
709 get_order(pci_seg->rlookup_table_size));
710 pci_seg->irq_lookup_table = NULL;
713 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
717 pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
718 get_order(pci_seg->alias_table_size));
719 if (!pci_seg->alias_table)
723 * let all alias entries point to itself
725 for (i = 0; i <= pci_seg->last_bdf; ++i)
726 pci_seg->alias_table[i] = i;
731 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
733 free_pages((unsigned long)pci_seg->alias_table,
734 get_order(pci_seg->alias_table_size));
735 pci_seg->alias_table = NULL;
739 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
740 * write commands to that buffer later and the IOMMU will execute them
743 static int __init alloc_command_buffer(struct amd_iommu *iommu)
745 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
746 get_order(CMD_BUFFER_SIZE));
748 return iommu->cmd_buf ? 0 : -ENOMEM;
752 * This function restarts event logging in case the IOMMU experienced
753 * an event log buffer overflow.
755 void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
757 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
758 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
762 * This function restarts event logging in case the IOMMU experienced
763 * an GA log overflow.
765 void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
769 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
770 if (status & MMIO_STATUS_GALOG_RUN_MASK)
773 pr_info_ratelimited("IOMMU GA Log restarting\n");
775 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
776 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
778 writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
779 iommu->mmio_base + MMIO_STATUS_OFFSET);
781 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
782 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
786 * This function resets the command buffer if the IOMMU stopped fetching
789 static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
791 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
793 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
794 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
795 iommu->cmd_buf_head = 0;
796 iommu->cmd_buf_tail = 0;
798 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
802 * This function writes the command buffer address to the hardware and
805 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
809 BUG_ON(iommu->cmd_buf == NULL);
811 entry = iommu_virt_to_phys(iommu->cmd_buf);
812 entry |= MMIO_CMD_SIZE_512;
814 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
815 &entry, sizeof(entry));
817 amd_iommu_reset_cmd_buffer(iommu);
821 * This function disables the command buffer
823 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
825 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
828 static void __init free_command_buffer(struct amd_iommu *iommu)
830 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
833 static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
834 gfp_t gfp, size_t size)
836 int order = get_order(size);
837 void *buf = (void *)__get_free_pages(gfp, order);
840 check_feature_on_all_iommus(FEATURE_SNP) &&
841 set_memory_4k((unsigned long)buf, (1 << order))) {
842 free_pages((unsigned long)buf, order);
849 /* allocates the memory where the IOMMU will log its events to */
850 static int __init alloc_event_buffer(struct amd_iommu *iommu)
852 iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
855 return iommu->evt_buf ? 0 : -ENOMEM;
858 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
862 BUG_ON(iommu->evt_buf == NULL);
864 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
866 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
867 &entry, sizeof(entry));
869 /* set head and tail to zero manually */
870 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
871 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
873 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
877 * This function disables the event log buffer
879 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
881 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
884 static void __init free_event_buffer(struct amd_iommu *iommu)
886 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
889 /* allocates the memory where the IOMMU will log its events to */
890 static int __init alloc_ppr_log(struct amd_iommu *iommu)
892 iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
895 return iommu->ppr_log ? 0 : -ENOMEM;
898 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
902 if (iommu->ppr_log == NULL)
905 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
907 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
908 &entry, sizeof(entry));
910 /* set head and tail to zero manually */
911 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
912 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
914 iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
915 iommu_feature_enable(iommu, CONTROL_PPR_EN);
918 static void __init free_ppr_log(struct amd_iommu *iommu)
920 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
923 static void free_ga_log(struct amd_iommu *iommu)
925 #ifdef CONFIG_IRQ_REMAP
926 free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
927 free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
931 #ifdef CONFIG_IRQ_REMAP
932 static int iommu_ga_log_enable(struct amd_iommu *iommu)
940 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
941 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
942 &entry, sizeof(entry));
943 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
944 (BIT_ULL(52)-1)) & ~7ULL;
945 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
946 &entry, sizeof(entry));
947 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
948 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
951 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
952 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
954 for (i = 0; i < LOOP_TIMEOUT; ++i) {
955 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
956 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
961 if (WARN_ON(i >= LOOP_TIMEOUT))
967 static int iommu_init_ga_log(struct amd_iommu *iommu)
969 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
972 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
973 get_order(GA_LOG_SIZE));
977 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
979 if (!iommu->ga_log_tail)
987 #endif /* CONFIG_IRQ_REMAP */
989 static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
991 iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
993 return iommu->cmd_sem ? 0 : -ENOMEM;
996 static void __init free_cwwb_sem(struct amd_iommu *iommu)
999 free_page((unsigned long)iommu->cmd_sem);
1002 static void iommu_enable_xt(struct amd_iommu *iommu)
1004 #ifdef CONFIG_IRQ_REMAP
1006 * XT mode (32-bit APIC destination ID) requires
1007 * GA mode (128-bit IRTE support) as a prerequisite.
1009 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
1010 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1011 iommu_feature_enable(iommu, CONTROL_XT_EN);
1012 #endif /* CONFIG_IRQ_REMAP */
1015 static void iommu_enable_gt(struct amd_iommu *iommu)
1017 if (!iommu_feature(iommu, FEATURE_GT))
1020 iommu_feature_enable(iommu, CONTROL_GT_EN);
1023 /* sets a specific bit in the device table entry. */
1024 static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
1027 int i = (bit >> 6) & 0x03;
1028 int _bit = bit & 0x3f;
1030 dev_table[devid].data[i] |= (1UL << _bit);
1033 static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1035 struct dev_table_entry *dev_table = get_dev_table(iommu);
1037 return __set_dev_entry_bit(dev_table, devid, bit);
1040 static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
1043 int i = (bit >> 6) & 0x03;
1044 int _bit = bit & 0x3f;
1046 return (dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
1049 static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
1051 struct dev_table_entry *dev_table = get_dev_table(iommu);
1053 return __get_dev_entry_bit(dev_table, devid, bit);
1056 static bool __copy_device_table(struct amd_iommu *iommu)
1058 u64 int_ctl, int_tab_len, entry = 0;
1059 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1060 struct dev_table_entry *old_devtb = NULL;
1061 u32 lo, hi, devid, old_devtb_size;
1062 phys_addr_t old_devtb_phys;
1063 u16 dom_id, dte_v, irq_v;
1067 /* Each IOMMU use separate device table with the same size */
1068 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
1069 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
1070 entry = (((u64) hi) << 32) + lo;
1072 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
1073 if (old_devtb_size != pci_seg->dev_table_size) {
1074 pr_err("The device table size of IOMMU:%d is not expected!\n",
1080 * When SME is enabled in the first kernel, the entry includes the
1081 * memory encryption mask(sme_me_mask), we must remove the memory
1082 * encryption mask to obtain the true physical address in kdump kernel.
1084 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
1086 if (old_devtb_phys >= 0x100000000ULL) {
1087 pr_err("The address of old device table is above 4G, not trustworthy!\n");
1090 old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
1091 ? (__force void *)ioremap_encrypted(old_devtb_phys,
1092 pci_seg->dev_table_size)
1093 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
1098 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
1099 pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
1100 get_order(pci_seg->dev_table_size));
1101 if (pci_seg->old_dev_tbl_cpy == NULL) {
1102 pr_err("Failed to allocate memory for copying old device table!\n");
1103 memunmap(old_devtb);
1107 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
1108 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
1109 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
1110 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
1112 if (dte_v && dom_id) {
1113 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
1114 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
1115 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
1116 /* If gcr3 table existed, mask it out */
1117 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
1118 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1119 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1120 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
1121 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
1123 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
1127 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
1128 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
1129 int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
1130 if (irq_v && (int_ctl || int_tab_len)) {
1131 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
1132 (int_tab_len != DTE_INTTABLEN)) {
1133 pr_err("Wrong old irq remapping flag: %#x\n", devid);
1134 memunmap(old_devtb);
1138 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
1141 memunmap(old_devtb);
1146 static bool copy_device_table(void)
1148 struct amd_iommu *iommu;
1149 struct amd_iommu_pci_seg *pci_seg;
1151 if (!amd_iommu_pre_enabled)
1154 pr_warn("Translation is already enabled - trying to copy translation structures\n");
1157 * All IOMMUs within PCI segment shares common device table.
1158 * Hence copy device table only once per PCI segment.
1160 for_each_pci_segment(pci_seg) {
1161 for_each_iommu(iommu) {
1162 if (pci_seg->id != iommu->pci_seg->id)
1164 if (!__copy_device_table(iommu))
1173 void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
1177 sysmgt = get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1) |
1178 (get_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2) << 1);
1181 set_dev_entry_bit(iommu, devid, DEV_ENTRY_IW);
1185 * This function takes the device specific flags read from the ACPI
1186 * table and sets up the device table entry with that information
1188 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1189 u16 devid, u32 flags, u32 ext_flags)
1191 if (flags & ACPI_DEVFLAG_INITPASS)
1192 set_dev_entry_bit(iommu, devid, DEV_ENTRY_INIT_PASS);
1193 if (flags & ACPI_DEVFLAG_EXTINT)
1194 set_dev_entry_bit(iommu, devid, DEV_ENTRY_EINT_PASS);
1195 if (flags & ACPI_DEVFLAG_NMI)
1196 set_dev_entry_bit(iommu, devid, DEV_ENTRY_NMI_PASS);
1197 if (flags & ACPI_DEVFLAG_SYSMGT1)
1198 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT1);
1199 if (flags & ACPI_DEVFLAG_SYSMGT2)
1200 set_dev_entry_bit(iommu, devid, DEV_ENTRY_SYSMGT2);
1201 if (flags & ACPI_DEVFLAG_LINT0)
1202 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT0_PASS);
1203 if (flags & ACPI_DEVFLAG_LINT1)
1204 set_dev_entry_bit(iommu, devid, DEV_ENTRY_LINT1_PASS);
1206 amd_iommu_apply_erratum_63(iommu, devid);
1208 amd_iommu_set_rlookup_table(iommu, devid);
1211 int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
1213 struct devid_map *entry;
1214 struct list_head *list;
1216 if (type == IVHD_SPECIAL_IOAPIC)
1218 else if (type == IVHD_SPECIAL_HPET)
1223 list_for_each_entry(entry, list, list) {
1224 if (!(entry->id == id && entry->cmd_line))
1227 pr_info("Command-line override present for %s id %d - ignoring\n",
1228 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1230 *devid = entry->devid;
1235 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1240 entry->devid = *devid;
1241 entry->cmd_line = cmd_line;
1243 list_add_tail(&entry->list, list);
1248 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
1251 struct acpihid_map_entry *entry;
1252 struct list_head *list = &acpihid_map;
1254 list_for_each_entry(entry, list, list) {
1255 if (strcmp(entry->hid, hid) ||
1256 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1260 pr_info("Command-line override for hid:%s uid:%s\n",
1262 *devid = entry->devid;
1266 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1270 memcpy(entry->uid, uid, strlen(uid));
1271 memcpy(entry->hid, hid, strlen(hid));
1272 entry->devid = *devid;
1273 entry->cmd_line = cmd_line;
1274 entry->root_devid = (entry->devid & (~0x7));
1276 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1277 entry->cmd_line ? "cmd" : "ivrs",
1278 entry->hid, entry->uid, entry->root_devid);
1280 list_add_tail(&entry->list, list);
1284 static int __init add_early_maps(void)
1288 for (i = 0; i < early_ioapic_map_size; ++i) {
1289 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1290 early_ioapic_map[i].id,
1291 &early_ioapic_map[i].devid,
1292 early_ioapic_map[i].cmd_line);
1297 for (i = 0; i < early_hpet_map_size; ++i) {
1298 ret = add_special_device(IVHD_SPECIAL_HPET,
1299 early_hpet_map[i].id,
1300 &early_hpet_map[i].devid,
1301 early_hpet_map[i].cmd_line);
1306 for (i = 0; i < early_acpihid_map_size; ++i) {
1307 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1308 early_acpihid_map[i].uid,
1309 &early_acpihid_map[i].devid,
1310 early_acpihid_map[i].cmd_line);
1319 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
1320 * initializes the hardware and our data structures with it.
1322 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1323 struct ivhd_header *h)
1326 u8 *end = p, flags = 0;
1327 u16 devid = 0, devid_start = 0, devid_to = 0, seg_id;
1328 u32 dev_i, ext_flags = 0;
1330 struct ivhd_entry *e;
1331 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
1336 ret = add_early_maps();
1340 amd_iommu_apply_ivrs_quirks();
1343 * First save the recommended feature enable bits from ACPI
1345 iommu->acpi_flags = h->flags;
1348 * Done. Now parse the device entries
1350 ivhd_size = get_ivhd_header_size(h);
1352 pr_err("Unsupported IVHD type %#x\n", h->type);
1362 e = (struct ivhd_entry *)p;
1363 seg_id = pci_seg->id;
1368 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1370 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i)
1371 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1373 case IVHD_DEV_SELECT:
1375 DUMP_printk(" DEV_SELECT\t\t\t devid: %04x:%02x:%02x.%x "
1377 seg_id, PCI_BUS_NUM(e->devid),
1383 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1385 case IVHD_DEV_SELECT_RANGE_START:
1387 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1388 "devid: %04x:%02x:%02x.%x flags: %02x\n",
1389 seg_id, PCI_BUS_NUM(e->devid),
1394 devid_start = e->devid;
1399 case IVHD_DEV_ALIAS:
1401 DUMP_printk(" DEV_ALIAS\t\t\t devid: %04x:%02x:%02x.%x "
1402 "flags: %02x devid_to: %02x:%02x.%x\n",
1403 seg_id, PCI_BUS_NUM(e->devid),
1407 PCI_BUS_NUM(e->ext >> 8),
1408 PCI_SLOT(e->ext >> 8),
1409 PCI_FUNC(e->ext >> 8));
1412 devid_to = e->ext >> 8;
1413 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1414 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1415 pci_seg->alias_table[devid] = devid_to;
1417 case IVHD_DEV_ALIAS_RANGE:
1419 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1420 "devid: %04x:%02x:%02x.%x flags: %02x "
1421 "devid_to: %04x:%02x:%02x.%x\n",
1422 seg_id, PCI_BUS_NUM(e->devid),
1426 seg_id, PCI_BUS_NUM(e->ext >> 8),
1427 PCI_SLOT(e->ext >> 8),
1428 PCI_FUNC(e->ext >> 8));
1430 devid_start = e->devid;
1432 devid_to = e->ext >> 8;
1436 case IVHD_DEV_EXT_SELECT:
1438 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %04x:%02x:%02x.%x "
1439 "flags: %02x ext: %08x\n",
1440 seg_id, PCI_BUS_NUM(e->devid),
1446 set_dev_entry_from_acpi(iommu, devid, e->flags,
1449 case IVHD_DEV_EXT_SELECT_RANGE:
1451 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1452 "%04x:%02x:%02x.%x flags: %02x ext: %08x\n",
1453 seg_id, PCI_BUS_NUM(e->devid),
1458 devid_start = e->devid;
1463 case IVHD_DEV_RANGE_END:
1465 DUMP_printk(" DEV_RANGE_END\t\t devid: %04x:%02x:%02x.%x\n",
1466 seg_id, PCI_BUS_NUM(e->devid),
1468 PCI_FUNC(e->devid));
1471 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1473 pci_seg->alias_table[dev_i] = devid_to;
1474 set_dev_entry_from_acpi(iommu,
1475 devid_to, flags, ext_flags);
1477 set_dev_entry_from_acpi(iommu, dev_i,
1481 case IVHD_DEV_SPECIAL: {
1487 handle = e->ext & 0xff;
1488 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8));
1489 type = (e->ext >> 24) & 0xff;
1491 if (type == IVHD_SPECIAL_IOAPIC)
1493 else if (type == IVHD_SPECIAL_HPET)
1498 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x\n",
1500 seg_id, PCI_BUS_NUM(devid),
1504 ret = add_special_device(type, handle, &devid, false);
1509 * add_special_device might update the devid in case a
1510 * command-line override is present. So call
1511 * set_dev_entry_from_acpi after add_special_device.
1513 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1517 case IVHD_DEV_ACPI_HID: {
1519 u8 hid[ACPIHID_HID_LEN];
1520 u8 uid[ACPIHID_UID_LEN];
1523 if (h->type != 0x40) {
1524 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1529 BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
1530 memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
1531 hid[ACPIHID_HID_LEN - 1] = '\0';
1534 pr_err(FW_BUG "Invalid HID.\n");
1540 case UID_NOT_PRESENT:
1543 pr_warn(FW_BUG "Invalid UID length.\n");
1546 case UID_IS_INTEGER:
1548 sprintf(uid, "%d", e->uid);
1551 case UID_IS_CHARACTER:
1553 memcpy(uid, &e->uid, e->uidl);
1554 uid[e->uidl] = '\0';
1561 devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid);
1562 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x\n",
1570 ret = add_acpi_hid_device(hid, uid, &devid, false);
1575 * add_special_device might update the devid in case a
1576 * command-line override is present. So call
1577 * set_dev_entry_from_acpi after add_special_device.
1579 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1587 p += ivhd_entry_length(p);
1593 /* Allocate PCI segment data structure */
1594 static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
1595 struct acpi_table_header *ivrs_base)
1597 struct amd_iommu_pci_seg *pci_seg;
1601 * First parse ACPI tables to find the largest Bus/Dev/Func we need to
1602 * handle in this PCI segment. Upon this information the shared data
1603 * structures for the PCI segments in the system will be allocated.
1605 last_bdf = find_last_devid_acpi(ivrs_base, id);
1609 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL);
1610 if (pci_seg == NULL)
1613 pci_seg->last_bdf = last_bdf;
1614 DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
1615 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
1616 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
1617 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
1620 init_llist_head(&pci_seg->dev_data_list);
1621 INIT_LIST_HEAD(&pci_seg->unity_map);
1622 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
1624 if (alloc_dev_table(pci_seg))
1626 if (alloc_alias_table(pci_seg))
1628 if (alloc_rlookup_table(pci_seg))
1634 static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
1635 struct acpi_table_header *ivrs_base)
1637 struct amd_iommu_pci_seg *pci_seg;
1639 for_each_pci_segment(pci_seg) {
1640 if (pci_seg->id == id)
1644 return alloc_pci_segment(id, ivrs_base);
1647 static void __init free_pci_segments(void)
1649 struct amd_iommu_pci_seg *pci_seg, *next;
1651 for_each_pci_segment_safe(pci_seg, next) {
1652 list_del(&pci_seg->list);
1653 free_irq_lookup_table(pci_seg);
1654 free_rlookup_table(pci_seg);
1655 free_alias_table(pci_seg);
1656 free_dev_table(pci_seg);
1661 static void __init free_iommu_one(struct amd_iommu *iommu)
1663 free_cwwb_sem(iommu);
1664 free_command_buffer(iommu);
1665 free_event_buffer(iommu);
1666 free_ppr_log(iommu);
1668 iommu_unmap_mmio_space(iommu);
1671 static void __init free_iommu_all(void)
1673 struct amd_iommu *iommu, *next;
1675 for_each_iommu_safe(iommu, next) {
1676 list_del(&iommu->list);
1677 free_iommu_one(iommu);
1683 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
1685 * BIOS should disable L2B micellaneous clock gating by setting
1686 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
1688 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1692 if ((boot_cpu_data.x86 != 0x15) ||
1693 (boot_cpu_data.x86_model < 0x10) ||
1694 (boot_cpu_data.x86_model > 0x1f))
1697 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1698 pci_read_config_dword(iommu->dev, 0xf4, &value);
1703 /* Select NB indirect register 0x90 and enable writing */
1704 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1706 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1707 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1709 /* Clear the enable writing bit */
1710 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1714 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1716 * BIOS should enable ATS write permission check by setting
1717 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1719 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1723 if ((boot_cpu_data.x86 != 0x15) ||
1724 (boot_cpu_data.x86_model < 0x30) ||
1725 (boot_cpu_data.x86_model > 0x3f))
1728 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1729 value = iommu_read_l2(iommu, 0x47);
1734 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1735 iommu_write_l2(iommu, 0x47, value | BIT(0));
1737 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1741 * This function glues the initialization function for one IOMMU
1742 * together and also allocates the command buffer and programs the
1743 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1745 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
1746 struct acpi_table_header *ivrs_base)
1748 struct amd_iommu_pci_seg *pci_seg;
1750 pci_seg = get_pci_segment(h->pci_seg, ivrs_base);
1751 if (pci_seg == NULL)
1753 iommu->pci_seg = pci_seg;
1755 raw_spin_lock_init(&iommu->lock);
1756 iommu->cmd_sem_val = 0;
1758 /* Add IOMMU to internal data structures */
1759 list_add_tail(&iommu->list, &amd_iommu_list);
1760 iommu->index = amd_iommus_present++;
1762 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1763 WARN(1, "System has more IOMMUs than supported by this driver\n");
1767 /* Index is fine - add IOMMU to the array */
1768 amd_iommus[iommu->index] = iommu;
1771 * Copy data from ACPI table entry to the iommu struct
1773 iommu->devid = h->devid;
1774 iommu->cap_ptr = h->cap_ptr;
1775 iommu->mmio_phys = h->mmio_phys;
1779 /* Check if IVHD EFR contains proper max banks/counters */
1780 if ((h->efr_attr != 0) &&
1781 ((h->efr_attr & (0xF << 13)) != 0) &&
1782 ((h->efr_attr & (0x3F << 17)) != 0))
1783 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1785 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1788 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1789 * GAM also requires GA mode. Therefore, we need to
1790 * check cmpxchg16b support before enabling it.
1792 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1793 ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1794 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1798 if (h->efr_reg & (1 << 9))
1799 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1801 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1804 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
1805 * XT, GAM also requires GA mode. Therefore, we need to
1806 * check cmpxchg16b support before enabling them.
1808 if (!boot_cpu_has(X86_FEATURE_CX16) ||
1809 ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
1810 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1814 if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
1815 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1817 early_iommu_features_init(iommu, h);
1824 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1825 iommu->mmio_phys_end);
1826 if (!iommu->mmio_base)
1829 return init_iommu_from_acpi(iommu, h);
1832 static int __init init_iommu_one_late(struct amd_iommu *iommu)
1836 if (alloc_cwwb_sem(iommu))
1839 if (alloc_command_buffer(iommu))
1842 if (alloc_event_buffer(iommu))
1845 iommu->int_enabled = false;
1847 init_translation_status(iommu);
1848 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1849 iommu_disable(iommu);
1850 clear_translation_pre_enabled(iommu);
1851 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1854 if (amd_iommu_pre_enabled)
1855 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1857 if (amd_iommu_irq_remap) {
1858 ret = amd_iommu_create_irq_domain(iommu);
1864 * Make sure IOMMU is not considered to translate itself. The IVRS
1865 * table tells us so, but this is a lie!
1867 iommu->pci_seg->rlookup_table[iommu->devid] = NULL;
1873 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1874 * @ivrs: Pointer to the IVRS header
1876 * This function search through all IVDB of the maximum supported IVHD
1878 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1880 u8 *base = (u8 *)ivrs;
1881 struct ivhd_header *ivhd = (struct ivhd_header *)
1882 (base + IVRS_HEADER_LENGTH);
1883 u8 last_type = ivhd->type;
1884 u16 devid = ivhd->devid;
1886 while (((u8 *)ivhd - base < ivrs->length) &&
1887 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1888 u8 *p = (u8 *) ivhd;
1890 if (ivhd->devid == devid)
1891 last_type = ivhd->type;
1892 ivhd = (struct ivhd_header *)(p + ivhd->length);
1899 * Iterates over all IOMMU entries in the ACPI table, allocates the
1900 * IOMMU structure and initializes it with init_iommu_one()
1902 static int __init init_iommu_all(struct acpi_table_header *table)
1904 u8 *p = (u8 *)table, *end = (u8 *)table;
1905 struct ivhd_header *h;
1906 struct amd_iommu *iommu;
1909 end += table->length;
1910 p += IVRS_HEADER_LENGTH;
1912 /* Phase 1: Process all IVHD blocks */
1914 h = (struct ivhd_header *)p;
1915 if (*p == amd_iommu_target_ivhd_type) {
1917 DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x "
1918 "flags: %01x info %04x\n",
1919 h->pci_seg, PCI_BUS_NUM(h->devid),
1920 PCI_SLOT(h->devid), PCI_FUNC(h->devid),
1921 h->cap_ptr, h->flags, h->info);
1922 DUMP_printk(" mmio-addr: %016llx\n",
1925 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1929 ret = init_iommu_one(iommu, h, table);
1938 /* Phase 2 : Early feature support check */
1941 /* Phase 3 : Enabling IOMMU features */
1942 for_each_iommu(iommu) {
1943 ret = init_iommu_one_late(iommu);
1951 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1954 struct pci_dev *pdev = iommu->dev;
1956 if (!iommu_feature(iommu, FEATURE_PC))
1959 amd_iommu_pc_present = true;
1961 pci_info(pdev, "IOMMU performance counters supported\n");
1963 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1964 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1965 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1970 static ssize_t amd_iommu_show_cap(struct device *dev,
1971 struct device_attribute *attr,
1974 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1975 return sysfs_emit(buf, "%x\n", iommu->cap);
1977 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1979 static ssize_t amd_iommu_show_features(struct device *dev,
1980 struct device_attribute *attr,
1983 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1984 return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
1986 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1988 static struct attribute *amd_iommu_attrs[] = {
1990 &dev_attr_features.attr,
1994 static struct attribute_group amd_iommu_group = {
1995 .name = "amd-iommu",
1996 .attrs = amd_iommu_attrs,
1999 static const struct attribute_group *amd_iommu_groups[] = {
2005 * Note: IVHD 0x11 and 0x40 also contains exact copy
2006 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
2007 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
2009 static void __init late_iommu_features_init(struct amd_iommu *iommu)
2011 u64 features, features2;
2013 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
2016 /* read extended feature bits */
2017 features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
2018 features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
2020 if (!iommu->features) {
2021 iommu->features = features;
2022 iommu->features2 = features2;
2027 * Sanity check and warn if EFR values from
2028 * IVHD and MMIO conflict.
2030 if (features != iommu->features ||
2031 features2 != iommu->features2) {
2033 "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
2034 features, iommu->features,
2035 features2, iommu->features2);
2039 static int __init iommu_init_pci(struct amd_iommu *iommu)
2041 int cap_ptr = iommu->cap_ptr;
2044 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2045 PCI_BUS_NUM(iommu->devid),
2046 iommu->devid & 0xff);
2050 /* Prevent binding other PCI device drivers to IOMMU devices */
2051 iommu->dev->match_driver = false;
2053 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
2056 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
2057 amd_iommu_iotlb_sup = false;
2059 late_iommu_features_init(iommu);
2061 if (iommu_feature(iommu, FEATURE_GT)) {
2066 pasmax = iommu->features & FEATURE_PASID_MASK;
2067 pasmax >>= FEATURE_PASID_SHIFT;
2068 max_pasid = (1 << (pasmax + 1)) - 1;
2070 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
2072 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
2074 glxval = iommu->features & FEATURE_GLXVAL_MASK;
2075 glxval >>= FEATURE_GLXVAL_SHIFT;
2077 if (amd_iommu_max_glx_val == -1)
2078 amd_iommu_max_glx_val = glxval;
2080 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
2083 if (iommu_feature(iommu, FEATURE_GT) &&
2084 iommu_feature(iommu, FEATURE_PPR)) {
2085 iommu->is_iommu_v2 = true;
2086 amd_iommu_v2_present = true;
2089 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
2092 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
2093 pr_info("Using strict mode due to virtualization\n");
2094 iommu_set_dma_strict();
2095 amd_iommu_np_cache = true;
2098 init_iommu_perf_ctr(iommu);
2100 if (amd_iommu_pgtable == AMD_IOMMU_V2) {
2101 if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
2102 !iommu_feature(iommu, FEATURE_GT)) {
2103 pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
2104 amd_iommu_pgtable = AMD_IOMMU_V1;
2105 } else if (iommu_default_passthrough()) {
2106 pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
2107 amd_iommu_pgtable = AMD_IOMMU_V1;
2111 if (is_rd890_iommu(iommu->dev)) {
2115 pci_get_domain_bus_and_slot(iommu->pci_seg->id,
2116 iommu->dev->bus->number,
2120 * Some rd890 systems may not be fully reconfigured by the
2121 * BIOS, so it's necessary for us to store this information so
2122 * it can be reprogrammed on resume
2124 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
2125 &iommu->stored_addr_lo);
2126 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
2127 &iommu->stored_addr_hi);
2129 /* Low bit locks writes to configuration space */
2130 iommu->stored_addr_lo &= ~1;
2132 for (i = 0; i < 6; i++)
2133 for (j = 0; j < 0x12; j++)
2134 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
2136 for (i = 0; i < 0x83; i++)
2137 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
2140 amd_iommu_erratum_746_workaround(iommu);
2141 amd_iommu_ats_write_check_workaround(iommu);
2143 ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
2144 amd_iommu_groups, "ivhd%d", iommu->index);
2148 iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
2150 return pci_enable_device(iommu->dev);
2153 static void print_iommu_info(void)
2155 static const char * const feat_str[] = {
2156 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
2157 "IA", "GA", "HE", "PC"
2159 struct amd_iommu *iommu;
2161 for_each_iommu(iommu) {
2162 struct pci_dev *pdev = iommu->dev;
2165 pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
2167 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
2168 pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
2170 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
2171 if (iommu_feature(iommu, (1ULL << i)))
2172 pr_cont(" %s", feat_str[i]);
2175 if (iommu->features & FEATURE_GAM_VAPIC)
2176 pr_cont(" GA_vAPIC");
2178 if (iommu->features & FEATURE_SNP)
2184 if (irq_remapping_enabled) {
2185 pr_info("Interrupt remapping enabled\n");
2186 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2187 pr_info("X2APIC enabled\n");
2189 if (amd_iommu_pgtable == AMD_IOMMU_V2) {
2190 pr_info("V2 page table enabled (Paging mode : %d level)\n",
2191 amd_iommu_gpt_level);
2195 static int __init amd_iommu_init_pci(void)
2197 struct amd_iommu *iommu;
2198 struct amd_iommu_pci_seg *pci_seg;
2201 for_each_iommu(iommu) {
2202 ret = iommu_init_pci(iommu);
2204 pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
2208 /* Need to setup range after PCI init */
2209 iommu_set_cwwb_range(iommu);
2213 * Order is important here to make sure any unity map requirements are
2214 * fulfilled. The unity mappings are created and written to the device
2215 * table during the iommu_init_pci() call.
2217 * After that we call init_device_table_dma() to make sure any
2218 * uninitialized DTE will block DMA, and in the end we flush the caches
2219 * of all IOMMUs to make sure the changes to the device table are
2222 for_each_pci_segment(pci_seg)
2223 init_device_table_dma(pci_seg);
2225 for_each_iommu(iommu)
2226 iommu_flush_all_caches(iommu);
2234 /****************************************************************************
2236 * The following functions initialize the MSI interrupts for all IOMMUs
2237 * in the system. It's a bit challenging because there could be multiple
2238 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
2241 ****************************************************************************/
2243 static int iommu_setup_msi(struct amd_iommu *iommu)
2247 r = pci_enable_msi(iommu->dev);
2251 r = request_threaded_irq(iommu->dev->irq,
2252 amd_iommu_int_handler,
2253 amd_iommu_int_thread,
2258 pci_disable_msi(iommu->dev);
2269 dest_mode_logical : 1,
2276 } __attribute__ ((packed));
2279 static struct irq_chip intcapxt_controller;
2281 static int intcapxt_irqdomain_activate(struct irq_domain *domain,
2282 struct irq_data *irqd, bool reserve)
2287 static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
2288 struct irq_data *irqd)
2293 static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2294 unsigned int nr_irqs, void *arg)
2296 struct irq_alloc_info *info = arg;
2299 if (!info || info->type != X86_IRQ_ALLOC_TYPE_AMDVI)
2302 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
2306 for (i = virq; i < virq + nr_irqs; i++) {
2307 struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
2309 irqd->chip = &intcapxt_controller;
2310 irqd->chip_data = info->data;
2311 __irq_set_handler(i, handle_edge_irq, 0, "edge");
2317 static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
2318 unsigned int nr_irqs)
2320 irq_domain_free_irqs_top(domain, virq, nr_irqs);
2324 static void intcapxt_unmask_irq(struct irq_data *irqd)
2326 struct amd_iommu *iommu = irqd->chip_data;
2327 struct irq_cfg *cfg = irqd_cfg(irqd);
2331 xt.dest_mode_logical = apic->dest_mode_logical;
2332 xt.vector = cfg->vector;
2333 xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
2334 xt.destid_24_31 = cfg->dest_apicid >> 24;
2337 * Current IOMMU implementation uses the same IRQ for all
2338 * 3 IOMMU interrupts.
2340 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2341 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2342 writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2345 static void intcapxt_mask_irq(struct irq_data *irqd)
2347 struct amd_iommu *iommu = irqd->chip_data;
2349 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
2350 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
2351 writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
2355 static int intcapxt_set_affinity(struct irq_data *irqd,
2356 const struct cpumask *mask, bool force)
2358 struct irq_data *parent = irqd->parent_data;
2361 ret = parent->chip->irq_set_affinity(parent, mask, force);
2362 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
2367 static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
2369 return on ? -EOPNOTSUPP : 0;
2372 static struct irq_chip intcapxt_controller = {
2373 .name = "IOMMU-MSI",
2374 .irq_unmask = intcapxt_unmask_irq,
2375 .irq_mask = intcapxt_mask_irq,
2376 .irq_ack = irq_chip_ack_parent,
2377 .irq_retrigger = irq_chip_retrigger_hierarchy,
2378 .irq_set_affinity = intcapxt_set_affinity,
2379 .irq_set_wake = intcapxt_set_wake,
2380 .flags = IRQCHIP_MASK_ON_SUSPEND,
2383 static const struct irq_domain_ops intcapxt_domain_ops = {
2384 .alloc = intcapxt_irqdomain_alloc,
2385 .free = intcapxt_irqdomain_free,
2386 .activate = intcapxt_irqdomain_activate,
2387 .deactivate = intcapxt_irqdomain_deactivate,
2391 static struct irq_domain *iommu_irqdomain;
2393 static struct irq_domain *iommu_get_irqdomain(void)
2395 struct fwnode_handle *fn;
2397 /* No need for locking here (yet) as the init is single-threaded */
2398 if (iommu_irqdomain)
2399 return iommu_irqdomain;
2401 fn = irq_domain_alloc_named_fwnode("AMD-Vi-MSI");
2405 iommu_irqdomain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0,
2406 fn, &intcapxt_domain_ops,
2408 if (!iommu_irqdomain)
2409 irq_domain_free_fwnode(fn);
2411 return iommu_irqdomain;
2414 static int iommu_setup_intcapxt(struct amd_iommu *iommu)
2416 struct irq_domain *domain;
2417 struct irq_alloc_info info;
2419 int node = dev_to_node(&iommu->dev->dev);
2421 domain = iommu_get_irqdomain();
2425 init_irq_alloc_info(&info, NULL);
2426 info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
2429 irq = irq_domain_alloc_irqs(domain, 1, node, &info);
2431 irq_domain_remove(domain);
2435 ret = request_threaded_irq(irq, amd_iommu_int_handler,
2436 amd_iommu_int_thread, 0, "AMD-Vi", iommu);
2438 irq_domain_free_irqs(irq, 1);
2439 irq_domain_remove(domain);
2446 static int iommu_init_irq(struct amd_iommu *iommu)
2450 if (iommu->int_enabled)
2453 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2454 ret = iommu_setup_intcapxt(iommu);
2455 else if (iommu->dev->msi_cap)
2456 ret = iommu_setup_msi(iommu);
2463 iommu->int_enabled = true;
2466 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
2467 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2469 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2471 if (iommu->ppr_log != NULL)
2472 iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
2476 /****************************************************************************
2478 * The next functions belong to the third pass of parsing the ACPI
2479 * table. In this last pass the memory mapping requirements are
2480 * gathered (like exclusion and unity mapping ranges).
2482 ****************************************************************************/
2484 static void __init free_unity_maps(void)
2486 struct unity_map_entry *entry, *next;
2487 struct amd_iommu_pci_seg *p, *pci_seg;
2489 for_each_pci_segment_safe(pci_seg, p) {
2490 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) {
2491 list_del(&entry->list);
2497 /* called for unity map ACPI definition */
2498 static int __init init_unity_map_range(struct ivmd_header *m,
2499 struct acpi_table_header *ivrs_base)
2501 struct unity_map_entry *e = NULL;
2502 struct amd_iommu_pci_seg *pci_seg;
2505 pci_seg = get_pci_segment(m->pci_seg, ivrs_base);
2506 if (pci_seg == NULL)
2509 e = kzalloc(sizeof(*e), GFP_KERNEL);
2517 case ACPI_IVMD_TYPE:
2518 s = "IVMD_TYPEi\t\t\t";
2519 e->devid_start = e->devid_end = m->devid;
2521 case ACPI_IVMD_TYPE_ALL:
2522 s = "IVMD_TYPE_ALL\t\t";
2524 e->devid_end = pci_seg->last_bdf;
2526 case ACPI_IVMD_TYPE_RANGE:
2527 s = "IVMD_TYPE_RANGE\t\t";
2528 e->devid_start = m->devid;
2529 e->devid_end = m->aux;
2532 e->address_start = PAGE_ALIGN(m->range_start);
2533 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2534 e->prot = m->flags >> 1;
2537 * Treat per-device exclusion ranges as r/w unity-mapped regions
2538 * since some buggy BIOSes might lead to the overwritten exclusion
2539 * range (exclusion_start and exclusion_length members). This
2540 * happens when there are multiple exclusion ranges (IVMD entries)
2541 * defined in ACPI table.
2543 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2544 e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1;
2546 DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: "
2547 "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx"
2548 " flags: %x\n", s, m->pci_seg,
2549 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2550 PCI_FUNC(e->devid_start), m->pci_seg,
2551 PCI_BUS_NUM(e->devid_end),
2552 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2553 e->address_start, e->address_end, m->flags);
2555 list_add_tail(&e->list, &pci_seg->unity_map);
2560 /* iterates over all memory definitions we find in the ACPI table */
2561 static int __init init_memory_definitions(struct acpi_table_header *table)
2563 u8 *p = (u8 *)table, *end = (u8 *)table;
2564 struct ivmd_header *m;
2566 end += table->length;
2567 p += IVRS_HEADER_LENGTH;
2570 m = (struct ivmd_header *)p;
2571 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2572 init_unity_map_range(m, table);
2581 * Init the device table to not allow DMA access for devices
2583 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2586 struct dev_table_entry *dev_table = pci_seg->dev_table;
2588 if (dev_table == NULL)
2591 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2592 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_VALID);
2593 if (!amd_iommu_snp_en)
2594 __set_dev_entry_bit(dev_table, devid, DEV_ENTRY_TRANSLATION);
2598 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
2601 struct dev_table_entry *dev_table = pci_seg->dev_table;
2603 if (dev_table == NULL)
2606 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
2607 dev_table[devid].data[0] = 0ULL;
2608 dev_table[devid].data[1] = 0ULL;
2612 static void init_device_table(void)
2614 struct amd_iommu_pci_seg *pci_seg;
2617 if (!amd_iommu_irq_remap)
2620 for_each_pci_segment(pci_seg) {
2621 for (devid = 0; devid <= pci_seg->last_bdf; ++devid)
2622 __set_dev_entry_bit(pci_seg->dev_table,
2623 devid, DEV_ENTRY_IRQ_TBL_EN);
2627 static void iommu_init_flags(struct amd_iommu *iommu)
2629 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2630 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2631 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2633 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2634 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2635 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2637 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2638 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2639 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2641 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2642 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2643 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2646 * make IOMMU memory accesses cache coherent
2648 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2650 /* Set IOTLB invalidation timeout to 1s */
2651 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2654 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2657 u32 ioc_feature_control;
2658 struct pci_dev *pdev = iommu->root_pdev;
2660 /* RD890 BIOSes may not have completely reconfigured the iommu */
2661 if (!is_rd890_iommu(iommu->dev) || !pdev)
2665 * First, we need to ensure that the iommu is enabled. This is
2666 * controlled by a register in the northbridge
2669 /* Select Northbridge indirect register 0x75 and enable writing */
2670 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2671 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2673 /* Enable the iommu */
2674 if (!(ioc_feature_control & 0x1))
2675 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2677 /* Restore the iommu BAR */
2678 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2679 iommu->stored_addr_lo);
2680 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2681 iommu->stored_addr_hi);
2683 /* Restore the l1 indirect regs for each of the 6 l1s */
2684 for (i = 0; i < 6; i++)
2685 for (j = 0; j < 0x12; j++)
2686 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2688 /* Restore the l2 indirect regs */
2689 for (i = 0; i < 0x83; i++)
2690 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2692 /* Lock PCI setup registers */
2693 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2694 iommu->stored_addr_lo | 1);
2697 static void iommu_enable_ga(struct amd_iommu *iommu)
2699 #ifdef CONFIG_IRQ_REMAP
2700 switch (amd_iommu_guest_ir) {
2701 case AMD_IOMMU_GUEST_IR_VAPIC:
2702 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2703 iommu_feature_enable(iommu, CONTROL_GA_EN);
2704 iommu->irte_ops = &irte_128_ops;
2707 iommu->irte_ops = &irte_32_ops;
2713 static void early_enable_iommu(struct amd_iommu *iommu)
2715 iommu_disable(iommu);
2716 iommu_init_flags(iommu);
2717 iommu_set_device_table(iommu);
2718 iommu_enable_command_buffer(iommu);
2719 iommu_enable_event_buffer(iommu);
2720 iommu_set_exclusion_range(iommu);
2721 iommu_enable_ga(iommu);
2722 iommu_enable_xt(iommu);
2723 iommu_enable(iommu);
2724 iommu_flush_all_caches(iommu);
2728 * This function finally enables all IOMMUs found in the system after
2729 * they have been initialized.
2731 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
2732 * the old content of device table entries. Not this case or copy failed,
2733 * just continue as normal kernel does.
2735 static void early_enable_iommus(void)
2737 struct amd_iommu *iommu;
2738 struct amd_iommu_pci_seg *pci_seg;
2740 if (!copy_device_table()) {
2742 * If come here because of failure in copying device table from old
2743 * kernel with all IOMMUs enabled, print error message and try to
2744 * free allocated old_dev_tbl_cpy.
2746 if (amd_iommu_pre_enabled)
2747 pr_err("Failed to copy DEV table from previous kernel.\n");
2749 for_each_pci_segment(pci_seg) {
2750 if (pci_seg->old_dev_tbl_cpy != NULL) {
2751 free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
2752 get_order(pci_seg->dev_table_size));
2753 pci_seg->old_dev_tbl_cpy = NULL;
2757 for_each_iommu(iommu) {
2758 clear_translation_pre_enabled(iommu);
2759 early_enable_iommu(iommu);
2762 pr_info("Copied DEV table from previous kernel.\n");
2764 for_each_pci_segment(pci_seg) {
2765 free_pages((unsigned long)pci_seg->dev_table,
2766 get_order(pci_seg->dev_table_size));
2767 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
2770 for_each_iommu(iommu) {
2771 iommu_disable_command_buffer(iommu);
2772 iommu_disable_event_buffer(iommu);
2773 iommu_enable_command_buffer(iommu);
2774 iommu_enable_event_buffer(iommu);
2775 iommu_enable_ga(iommu);
2776 iommu_enable_xt(iommu);
2777 iommu_set_device_table(iommu);
2778 iommu_flush_all_caches(iommu);
2783 static void enable_iommus_v2(void)
2785 struct amd_iommu *iommu;
2787 for_each_iommu(iommu) {
2788 iommu_enable_ppr_log(iommu);
2789 iommu_enable_gt(iommu);
2793 static void enable_iommus_vapic(void)
2795 #ifdef CONFIG_IRQ_REMAP
2797 struct amd_iommu *iommu;
2799 for_each_iommu(iommu) {
2801 * Disable GALog if already running. It could have been enabled
2802 * in the previous boot before kdump.
2804 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2805 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2808 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
2809 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
2812 * Need to set and poll check the GALOGRun bit to zero before
2813 * we can set/ modify GA Log registers safely.
2815 for (i = 0; i < LOOP_TIMEOUT; ++i) {
2816 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
2817 if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
2822 if (WARN_ON(i >= LOOP_TIMEOUT))
2826 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2827 !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
2828 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2832 if (amd_iommu_snp_en &&
2833 !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) {
2834 pr_warn("Force to disable Virtual APIC due to SNP\n");
2835 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2839 /* Enabling GAM and SNPAVIC support */
2840 for_each_iommu(iommu) {
2841 if (iommu_init_ga_log(iommu) ||
2842 iommu_ga_log_enable(iommu))
2845 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2846 if (amd_iommu_snp_en)
2847 iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN);
2850 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2851 pr_info("Virtual APIC enabled\n");
2855 static void enable_iommus(void)
2857 early_enable_iommus();
2858 enable_iommus_vapic();
2862 static void disable_iommus(void)
2864 struct amd_iommu *iommu;
2866 for_each_iommu(iommu)
2867 iommu_disable(iommu);
2869 #ifdef CONFIG_IRQ_REMAP
2870 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2871 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2876 * Suspend/Resume support
2877 * disable suspend until real resume implemented
2880 static void amd_iommu_resume(void)
2882 struct amd_iommu *iommu;
2884 for_each_iommu(iommu)
2885 iommu_apply_resume_quirks(iommu);
2887 /* re-load the hardware */
2890 amd_iommu_enable_interrupts();
2893 static int amd_iommu_suspend(void)
2895 /* disable IOMMUs to go out of the way for BIOS */
2901 static struct syscore_ops amd_iommu_syscore_ops = {
2902 .suspend = amd_iommu_suspend,
2903 .resume = amd_iommu_resume,
2906 static void __init free_iommu_resources(void)
2908 kmem_cache_destroy(amd_iommu_irq_cache);
2909 amd_iommu_irq_cache = NULL;
2912 free_pci_segments();
2915 /* SB IOAPIC is always on this device in AMD systems */
2916 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2918 static bool __init check_ioapic_information(void)
2920 const char *fw_bug = FW_BUG;
2921 bool ret, has_sb_ioapic;
2924 has_sb_ioapic = false;
2928 * If we have map overrides on the kernel command line the
2929 * messages in this function might not describe firmware bugs
2930 * anymore - so be careful
2935 for (idx = 0; idx < nr_ioapics; idx++) {
2936 int devid, id = mpc_ioapic_id(idx);
2938 devid = get_ioapic_devid(id);
2940 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2943 } else if (devid == IOAPIC_SB_DEVID) {
2944 has_sb_ioapic = true;
2949 if (!has_sb_ioapic) {
2951 * We expect the SB IOAPIC to be listed in the IVRS
2952 * table. The system timer is connected to the SB IOAPIC
2953 * and if we don't have it in the list the system will
2954 * panic at boot time. This situation usually happens
2955 * when the BIOS is buggy and provides us the wrong
2956 * device id for the IOAPIC in the system.
2958 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2962 pr_err("Disabling interrupt remapping\n");
2967 static void __init free_dma_resources(void)
2969 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2970 get_order(MAX_DOMAIN_ID/8));
2971 amd_iommu_pd_alloc_bitmap = NULL;
2976 static void __init ivinfo_init(void *ivrs)
2978 amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
2982 * This is the hardware init function for AMD IOMMU in the system.
2983 * This function is called either from amd_iommu_init or from the interrupt
2984 * remapping setup code.
2986 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
2989 * 1 pass) Discover the most comprehensive IVHD type to use.
2991 * 2 pass) Find the highest PCI device id the driver has to handle.
2992 * Upon this information the size of the data structures is
2993 * determined that needs to be allocated.
2995 * 3 pass) Initialize the data structures just allocated with the
2996 * information in the ACPI table about available AMD IOMMUs
2997 * in the system. It also maps the PCI devices in the
2998 * system to specific IOMMUs
3000 * 4 pass) After the basic data structures are allocated and
3001 * initialized we update them with information about memory
3002 * remapping requirements parsed out of the ACPI table in
3005 * After everything is set up the IOMMUs are enabled and the necessary
3006 * hotplug and suspend notifiers are registered.
3008 static int __init early_amd_iommu_init(void)
3010 struct acpi_table_header *ivrs_base;
3011 int remap_cache_sz, ret;
3014 if (!amd_iommu_detected)
3017 status = acpi_get_table("IVRS", 0, &ivrs_base);
3018 if (status == AE_NOT_FOUND)
3020 else if (ACPI_FAILURE(status)) {
3021 const char *err = acpi_format_exception(status);
3022 pr_err("IVRS table error: %s\n", err);
3027 * Validate checksum here so we don't need to do it when
3028 * we actually parse the table
3030 ret = check_ivrs_checksum(ivrs_base);
3034 ivinfo_init(ivrs_base);
3036 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
3037 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
3039 /* Device table - directly used by all IOMMUs */
3042 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
3043 GFP_KERNEL | __GFP_ZERO,
3044 get_order(MAX_DOMAIN_ID/8));
3045 if (amd_iommu_pd_alloc_bitmap == NULL)
3049 * never allocate domain 0 because its used as the non-allocated and
3050 * error value placeholder
3052 __set_bit(0, amd_iommu_pd_alloc_bitmap);
3055 * now the data structures are allocated and basically initialized
3056 * start the real acpi table scan
3058 ret = init_iommu_all(ivrs_base);
3062 /* 5 level guest page table */
3063 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3064 check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL)
3065 amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
3067 /* Disable any previously enabled IOMMUs */
3068 if (!is_kdump_kernel() || amd_iommu_disabled)
3071 if (amd_iommu_irq_remap)
3072 amd_iommu_irq_remap = check_ioapic_information();
3074 if (amd_iommu_irq_remap) {
3075 struct amd_iommu_pci_seg *pci_seg;
3077 * Interrupt remapping enabled, create kmem_cache for the
3081 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3082 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
3084 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
3085 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
3087 DTE_INTTAB_ALIGNMENT,
3089 if (!amd_iommu_irq_cache)
3092 for_each_pci_segment(pci_seg) {
3093 if (alloc_irq_lookup_table(pci_seg))
3098 ret = init_memory_definitions(ivrs_base);
3102 /* init the device table */
3103 init_device_table();
3106 /* Don't leak any ACPI memory */
3107 acpi_put_table(ivrs_base);
3112 static int amd_iommu_enable_interrupts(void)
3114 struct amd_iommu *iommu;
3117 for_each_iommu(iommu) {
3118 ret = iommu_init_irq(iommu);
3127 static bool __init detect_ivrs(void)
3129 struct acpi_table_header *ivrs_base;
3133 status = acpi_get_table("IVRS", 0, &ivrs_base);
3134 if (status == AE_NOT_FOUND)
3136 else if (ACPI_FAILURE(status)) {
3137 const char *err = acpi_format_exception(status);
3138 pr_err("IVRS table error: %s\n", err);
3142 acpi_put_table(ivrs_base);
3144 if (amd_iommu_force_enable)
3147 /* Don't use IOMMU if there is Stoney Ridge graphics */
3148 for (i = 0; i < 32; i++) {
3151 pci_id = read_pci_config(0, i, 0, 0);
3152 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
3153 pr_info("Disable IOMMU on Stoney Ridge\n");
3159 /* Make sure ACS will be enabled during PCI probe */
3165 /****************************************************************************
3167 * AMD IOMMU Initialization State Machine
3169 ****************************************************************************/
3171 static int __init state_next(void)
3175 switch (init_state) {
3176 case IOMMU_START_STATE:
3177 if (!detect_ivrs()) {
3178 init_state = IOMMU_NOT_FOUND;
3181 init_state = IOMMU_IVRS_DETECTED;
3184 case IOMMU_IVRS_DETECTED:
3185 if (amd_iommu_disabled) {
3186 init_state = IOMMU_CMDLINE_DISABLED;
3189 ret = early_amd_iommu_init();
3190 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
3193 case IOMMU_ACPI_FINISHED:
3194 early_enable_iommus();
3195 x86_platform.iommu_shutdown = disable_iommus;
3196 init_state = IOMMU_ENABLED;
3199 register_syscore_ops(&amd_iommu_syscore_ops);
3200 ret = amd_iommu_init_pci();
3201 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
3202 enable_iommus_vapic();
3205 case IOMMU_PCI_INIT:
3206 ret = amd_iommu_enable_interrupts();
3207 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
3209 case IOMMU_INTERRUPTS_EN:
3210 init_state = IOMMU_INITIALIZED;
3212 case IOMMU_INITIALIZED:
3215 case IOMMU_NOT_FOUND:
3216 case IOMMU_INIT_ERROR:
3217 case IOMMU_CMDLINE_DISABLED:
3218 /* Error states => do nothing */
3227 free_dma_resources();
3228 if (!irq_remapping_enabled) {
3230 free_iommu_resources();
3232 struct amd_iommu *iommu;
3233 struct amd_iommu_pci_seg *pci_seg;
3235 for_each_pci_segment(pci_seg)
3236 uninit_device_table_dma(pci_seg);
3238 for_each_iommu(iommu)
3239 iommu_flush_all_caches(iommu);
3245 static int __init iommu_go_to_state(enum iommu_init_state state)
3249 while (init_state != state) {
3250 if (init_state == IOMMU_NOT_FOUND ||
3251 init_state == IOMMU_INIT_ERROR ||
3252 init_state == IOMMU_CMDLINE_DISABLED)
3260 #ifdef CONFIG_IRQ_REMAP
3261 int __init amd_iommu_prepare(void)
3265 amd_iommu_irq_remap = true;
3267 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
3269 amd_iommu_irq_remap = false;
3273 return amd_iommu_irq_remap ? 0 : -ENODEV;
3276 int __init amd_iommu_enable(void)
3280 ret = iommu_go_to_state(IOMMU_ENABLED);
3284 irq_remapping_enabled = 1;
3285 return amd_iommu_xt_mode;
3288 void amd_iommu_disable(void)
3290 amd_iommu_suspend();
3293 int amd_iommu_reenable(int mode)
3300 int __init amd_iommu_enable_faulting(void)
3302 /* We enable MSI later when PCI is initialized */
3308 * This is the core init function for AMD IOMMU hardware in the system.
3309 * This function is called from the generic x86 DMA layer initialization
3312 static int __init amd_iommu_init(void)
3314 struct amd_iommu *iommu;
3317 ret = iommu_go_to_state(IOMMU_INITIALIZED);
3318 #ifdef CONFIG_GART_IOMMU
3319 if (ret && list_empty(&amd_iommu_list)) {
3321 * We failed to initialize the AMD IOMMU - try fallback
3322 * to GART if possible.
3328 for_each_iommu(iommu)
3329 amd_iommu_debugfs_setup(iommu);
3334 static bool amd_iommu_sme_check(void)
3336 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
3337 (boot_cpu_data.x86 != 0x17))
3340 /* For Fam17h, a specific level of support is required */
3341 if (boot_cpu_data.microcode >= 0x08001205)
3344 if ((boot_cpu_data.microcode >= 0x08001126) &&
3345 (boot_cpu_data.microcode <= 0x080011ff))
3348 pr_notice("IOMMU not currently supported when SME is active\n");
3353 /****************************************************************************
3355 * Early detect code. This code runs at IOMMU detection time in the DMA
3356 * layer. It just looks if there is an IVRS ACPI table to detect AMD
3359 ****************************************************************************/
3360 int __init amd_iommu_detect(void)
3364 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
3367 if (!amd_iommu_sme_check())
3370 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
3374 amd_iommu_detected = true;
3376 x86_init.iommu.iommu_init = amd_iommu_init;
3381 /****************************************************************************
3383 * Parsing functions for the AMD IOMMU specific kernel command line
3386 ****************************************************************************/
3388 static int __init parse_amd_iommu_dump(char *str)
3390 amd_iommu_dump = true;
3395 static int __init parse_amd_iommu_intr(char *str)
3397 for (; *str; ++str) {
3398 if (strncmp(str, "legacy", 6) == 0) {
3399 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
3402 if (strncmp(str, "vapic", 5) == 0) {
3403 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
3410 static int __init parse_amd_iommu_options(char *str)
3416 if (strncmp(str, "fullflush", 9) == 0) {
3417 pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
3418 iommu_set_dma_strict();
3419 } else if (strncmp(str, "force_enable", 12) == 0) {
3420 amd_iommu_force_enable = true;
3421 } else if (strncmp(str, "off", 3) == 0) {
3422 amd_iommu_disabled = true;
3423 } else if (strncmp(str, "force_isolation", 15) == 0) {
3424 amd_iommu_force_isolation = true;
3425 } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
3426 amd_iommu_pgtable = AMD_IOMMU_V1;
3427 } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
3428 amd_iommu_pgtable = AMD_IOMMU_V2;
3430 pr_notice("Unknown option - '%s'\n", str);
3433 str += strcspn(str, ",");
3441 static int __init parse_ivrs_ioapic(char *str)
3443 u32 seg = 0, bus, dev, fn;
3447 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3448 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3451 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3452 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3453 pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
3454 str, id, seg, bus, dev, fn);
3458 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
3462 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
3463 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
3468 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3470 cmdline_maps = true;
3471 i = early_ioapic_map_size++;
3472 early_ioapic_map[i].id = id;
3473 early_ioapic_map[i].devid = devid;
3474 early_ioapic_map[i].cmd_line = true;
3479 static int __init parse_ivrs_hpet(char *str)
3481 u32 seg = 0, bus, dev, fn;
3485 if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3486 sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
3489 if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
3490 sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
3491 pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
3492 str, id, seg, bus, dev, fn);
3496 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3500 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3501 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3506 devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3508 cmdline_maps = true;
3509 i = early_hpet_map_size++;
3510 early_hpet_map[i].id = id;
3511 early_hpet_map[i].devid = devid;
3512 early_hpet_map[i].cmd_line = true;
3517 #define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
3519 static int __init parse_ivrs_acpihid(char *str)
3521 u32 seg = 0, bus, dev, fn;
3522 char *hid, *uid, *p, *addr;
3523 char acpiid[ACPIID_LEN] = {0};
3526 addr = strchr(str, '@');
3528 addr = strchr(str, '=');
3534 if (strlen(addr) > ACPIID_LEN)
3537 if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
3538 sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
3539 pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
3540 str, acpiid, seg, bus, dev, fn);
3546 /* We have the '@', make it the terminator to get just the acpiid */
3549 if (strlen(str) > ACPIID_LEN + 1)
3552 if (sscanf(str, "=%s", acpiid) != 1)
3555 if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
3556 sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
3560 pr_err("Invalid command line: ivrs_acpihid%s\n", str);
3565 hid = strsep(&p, ":");
3568 if (!hid || !(*hid) || !uid) {
3569 pr_err("Invalid command line: hid or uid\n");
3574 * Ignore leading zeroes after ':', so e.g., AMDI0095:00
3575 * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
3577 while (*uid == '0' && *(uid + 1))
3580 i = early_acpihid_map_size++;
3581 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3582 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3583 early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
3584 early_acpihid_map[i].cmd_line = true;
3589 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3590 __setup("amd_iommu=", parse_amd_iommu_options);
3591 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3592 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3593 __setup("ivrs_hpet", parse_ivrs_hpet);
3594 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3596 bool amd_iommu_v2_supported(void)
3598 /* CPU page table size should match IOMMU guest page table size */
3599 if (cpu_feature_enabled(X86_FEATURE_LA57) &&
3600 amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
3604 * Since DTE[Mode]=0 is prohibited on SNP-enabled system
3605 * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
3606 * setting up IOMMUv1 page table.
3608 return amd_iommu_v2_present && !amd_iommu_snp_en;
3610 EXPORT_SYMBOL(amd_iommu_v2_supported);
3612 struct amd_iommu *get_amd_iommu(unsigned int idx)
3615 struct amd_iommu *iommu;
3617 for_each_iommu(iommu)
3623 /****************************************************************************
3625 * IOMMU EFR Performance Counter support functionality. This code allows
3626 * access to the IOMMU PC functionality.
3628 ****************************************************************************/
3630 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3632 struct amd_iommu *iommu = get_amd_iommu(idx);
3635 return iommu->max_banks;
3639 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3641 bool amd_iommu_pc_supported(void)
3643 return amd_iommu_pc_present;
3645 EXPORT_SYMBOL(amd_iommu_pc_supported);
3647 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3649 struct amd_iommu *iommu = get_amd_iommu(idx);
3652 return iommu->max_counters;
3656 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3658 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3659 u8 fxn, u64 *value, bool is_write)
3664 /* Make sure the IOMMU PC resource is available */
3665 if (!amd_iommu_pc_present)
3668 /* Check for valid iommu and pc register indexing */
3669 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3672 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3674 /* Limit the offset to the hw defined mmio region aperture */
3675 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3676 (iommu->max_counters << 8) | 0x28);
3677 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3678 (offset > max_offset_lim))
3682 u64 val = *value & GENMASK_ULL(47, 0);
3684 writel((u32)val, iommu->mmio_base + offset);
3685 writel((val >> 32), iommu->mmio_base + offset + 4);
3687 *value = readl(iommu->mmio_base + offset + 4);
3689 *value |= readl(iommu->mmio_base + offset);
3690 *value &= GENMASK_ULL(47, 0);
3696 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3701 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3704 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3709 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3712 #ifdef CONFIG_AMD_MEM_ENCRYPT
3713 int amd_iommu_snp_enable(void)
3716 * The SNP support requires that IOMMU must be enabled, and is
3717 * not configured in the passthrough mode.
3719 if (no_iommu || iommu_default_passthrough()) {
3720 pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
3725 * Prevent enabling SNP after IOMMU_ENABLED state because this process
3726 * affect how IOMMU driver sets up data structures and configures
3729 if (init_state > IOMMU_ENABLED) {
3730 pr_err("SNP: Too late to enable SNP for IOMMU.\n");
3734 amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
3735 if (!amd_iommu_snp_en)
3738 pr_info("SNP enabled\n");
3740 /* Enforce IOMMU v1 pagetable when SNP is enabled. */
3741 if (amd_iommu_pgtable != AMD_IOMMU_V1) {
3742 pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
3743 amd_iommu_pgtable = AMD_IOMMU_V1;