2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #define pr_fmt(fmt) "DMAR: " fmt
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
45 #include "irq_remapping.h"
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58 * before IO devices managed by that unit.
59 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60 * after IO devices managed by that unit.
61 * 3) Hotplug events are rare.
63 * Locking rules for DMA and interrupt remapping related global data structures:
64 * 1) Use dmar_global_lock in process context
65 * 2) Use RCU in interrupt context
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
70 struct acpi_table_header * __initdata dmar_tbl;
71 static acpi_size dmar_tbl_size;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
78 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
81 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 if (drhd->include_all)
85 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
87 list_add_rcu(&drhd->list, &dmar_drhd_units);
90 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
92 struct acpi_dmar_device_scope *scope;
97 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
98 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
99 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
101 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
102 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
103 pr_warn("Unsupported device scope\n");
105 start += scope->length;
110 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
113 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
116 struct device *tmp_dev;
118 if (*devices && *cnt) {
119 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf[64];
131 static struct dmar_pci_notify_info *
132 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
137 struct dmar_pci_notify_info *info;
139 BUG_ON(dev->is_virtfn);
141 /* Only generate path[] for device addition event */
142 if (event == BUS_NOTIFY_ADD_DEVICE)
143 for (tmp = dev; tmp; tmp = tmp->bus->self)
146 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
147 if (size <= sizeof(dmar_pci_notify_info_buf)) {
148 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
150 info = kzalloc(size, GFP_KERNEL);
152 pr_warn("Out of memory when allocating notify_info "
153 "for %s.\n", pci_name(dev));
154 if (dmar_dev_scope_status == 0)
155 dmar_dev_scope_status = -ENOMEM;
162 info->seg = pci_domain_nr(dev->bus);
164 if (event == BUS_NOTIFY_ADD_DEVICE) {
165 for (tmp = dev; tmp; tmp = tmp->bus->self) {
167 info->path[level].bus = tmp->bus->number;
168 info->path[level].device = PCI_SLOT(tmp->devfn);
169 info->path[level].function = PCI_FUNC(tmp->devfn);
170 if (pci_is_root_bus(tmp->bus))
171 info->bus = tmp->bus->number;
178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
180 if ((void *)info != dmar_pci_notify_info_buf)
184 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
185 struct acpi_dmar_pci_path *path, int count)
189 if (info->bus != bus)
191 if (info->level != count)
194 for (i = 0; i < count; i++) {
195 if (path[i].device != info->path[i].device ||
196 path[i].function != info->path[i].function)
208 if (bus == info->path[i].bus &&
209 path[0].device == info->path[i].device &&
210 path[0].function == info->path[i].function) {
211 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 bus, path[0].device, path[0].function);
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
220 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
221 void *start, void*end, u16 segment,
222 struct dmar_dev_scope *devices,
226 struct device *tmp, *dev = &info->dev->dev;
227 struct acpi_dmar_device_scope *scope;
228 struct acpi_dmar_pci_path *path;
230 if (segment != info->seg)
233 for (; start < end; start += scope->length) {
235 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
236 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
239 path = (struct acpi_dmar_pci_path *)(scope + 1);
240 level = (scope->length - sizeof(*scope)) / sizeof(*path);
241 if (!dmar_match_pci_path(info, scope->bus, path, level))
245 * We expect devices with endpoint scope to have normal PCI
246 * headers, and devices with bridge scope to have bridge PCI
247 * headers. However PCI NTB devices may be listed in the
248 * DMAR table with bridge scope, even though they have a
249 * normal PCI header. NTB devices are identified by class
250 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
251 * for this special case.
253 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
254 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
255 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
256 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
257 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
258 pr_warn("Device scope type does not match for %s\n",
259 pci_name(info->dev));
263 for_each_dev_scope(devices, devices_cnt, i, tmp)
265 devices[i].bus = info->dev->bus->number;
266 devices[i].devfn = info->dev->devfn;
267 rcu_assign_pointer(devices[i].dev,
271 BUG_ON(i >= devices_cnt);
277 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
278 struct dmar_dev_scope *devices, int count)
283 if (info->seg != segment)
286 for_each_active_dev_scope(devices, count, index, tmp)
287 if (tmp == &info->dev->dev) {
288 RCU_INIT_POINTER(devices[index].dev, NULL);
297 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
300 struct dmar_drhd_unit *dmaru;
301 struct acpi_dmar_hardware_unit *drhd;
303 for_each_drhd_unit(dmaru) {
304 if (dmaru->include_all)
307 drhd = container_of(dmaru->hdr,
308 struct acpi_dmar_hardware_unit, header);
309 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
310 ((void *)drhd) + drhd->header.length,
312 dmaru->devices, dmaru->devices_cnt);
317 ret = dmar_iommu_notify_scope_dev(info);
318 if (ret < 0 && dmar_dev_scope_status == 0)
319 dmar_dev_scope_status = ret;
324 static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
326 struct dmar_drhd_unit *dmaru;
328 for_each_drhd_unit(dmaru)
329 if (dmar_remove_dev_scope(info, dmaru->segment,
330 dmaru->devices, dmaru->devices_cnt))
332 dmar_iommu_notify_scope_dev(info);
335 static int dmar_pci_bus_notifier(struct notifier_block *nb,
336 unsigned long action, void *data)
338 struct pci_dev *pdev = to_pci_dev(data);
339 struct dmar_pci_notify_info *info;
341 /* Only care about add/remove events for physical functions.
342 * For VFs we actually do the lookup based on the corresponding
343 * PF in device_to_iommu() anyway. */
346 if (action != BUS_NOTIFY_ADD_DEVICE &&
347 action != BUS_NOTIFY_REMOVED_DEVICE)
350 info = dmar_alloc_pci_notify_info(pdev, action);
354 down_write(&dmar_global_lock);
355 if (action == BUS_NOTIFY_ADD_DEVICE)
356 dmar_pci_bus_add_dev(info);
357 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
358 dmar_pci_bus_del_dev(info);
359 up_write(&dmar_global_lock);
361 dmar_free_pci_notify_info(info);
366 static struct notifier_block dmar_pci_bus_nb = {
367 .notifier_call = dmar_pci_bus_notifier,
371 static struct dmar_drhd_unit *
372 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
374 struct dmar_drhd_unit *dmaru;
376 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
377 if (dmaru->segment == drhd->segment &&
378 dmaru->reg_base_addr == drhd->address)
385 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
386 * structure which uniquely represent one DMA remapping hardware unit
387 * present in the platform
389 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
391 struct acpi_dmar_hardware_unit *drhd;
392 struct dmar_drhd_unit *dmaru;
395 drhd = (struct acpi_dmar_hardware_unit *)header;
396 dmaru = dmar_find_dmaru(drhd);
400 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
405 * If header is allocated from slab by ACPI _DSM method, we need to
406 * copy the content because the memory buffer will be freed on return.
408 dmaru->hdr = (void *)(dmaru + 1);
409 memcpy(dmaru->hdr, header, header->length);
410 dmaru->reg_base_addr = drhd->address;
411 dmaru->segment = drhd->segment;
412 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
413 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
414 ((void *)drhd) + drhd->header.length,
415 &dmaru->devices_cnt);
416 if (dmaru->devices_cnt && dmaru->devices == NULL) {
421 ret = alloc_iommu(dmaru);
423 dmar_free_dev_scope(&dmaru->devices,
424 &dmaru->devices_cnt);
428 dmar_register_drhd_unit(dmaru);
437 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
439 if (dmaru->devices && dmaru->devices_cnt)
440 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
442 free_iommu(dmaru->iommu);
446 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
449 struct acpi_dmar_andd *andd = (void *)header;
451 /* Check for NUL termination within the designated length */
452 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
453 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
454 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
455 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
456 dmi_get_system_info(DMI_BIOS_VENDOR),
457 dmi_get_system_info(DMI_BIOS_VERSION),
458 dmi_get_system_info(DMI_PRODUCT_VERSION));
461 pr_info("ANDD device: %x name: %s\n", andd->device_number,
467 #ifdef CONFIG_ACPI_NUMA
468 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
470 struct acpi_dmar_rhsa *rhsa;
471 struct dmar_drhd_unit *drhd;
473 rhsa = (struct acpi_dmar_rhsa *)header;
474 for_each_drhd_unit(drhd) {
475 if (drhd->reg_base_addr == rhsa->base_address) {
476 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
478 if (!node_online(node))
480 drhd->iommu->node = node;
485 1, TAINT_FIRMWARE_WORKAROUND,
486 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
487 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
489 dmi_get_system_info(DMI_BIOS_VENDOR),
490 dmi_get_system_info(DMI_BIOS_VERSION),
491 dmi_get_system_info(DMI_PRODUCT_VERSION));
496 #define dmar_parse_one_rhsa dmar_res_noop
500 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
502 struct acpi_dmar_hardware_unit *drhd;
503 struct acpi_dmar_reserved_memory *rmrr;
504 struct acpi_dmar_atsr *atsr;
505 struct acpi_dmar_rhsa *rhsa;
507 switch (header->type) {
508 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
509 drhd = container_of(header, struct acpi_dmar_hardware_unit,
511 pr_info("DRHD base: %#016Lx flags: %#x\n",
512 (unsigned long long)drhd->address, drhd->flags);
514 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
515 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
517 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
518 (unsigned long long)rmrr->base_address,
519 (unsigned long long)rmrr->end_address);
521 case ACPI_DMAR_TYPE_ROOT_ATS:
522 atsr = container_of(header, struct acpi_dmar_atsr, header);
523 pr_info("ATSR flags: %#x\n", atsr->flags);
525 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
526 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
527 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
528 (unsigned long long)rhsa->base_address,
529 rhsa->proximity_domain);
531 case ACPI_DMAR_TYPE_NAMESPACE:
532 /* We don't print this here because we need to sanity-check
533 it first. So print it in dmar_parse_one_andd() instead. */
539 * dmar_table_detect - checks to see if the platform supports DMAR devices
541 static int __init dmar_table_detect(void)
543 acpi_status status = AE_OK;
545 /* if we could find DMAR table, then there are DMAR devices */
546 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
547 (struct acpi_table_header **)&dmar_tbl,
550 if (ACPI_SUCCESS(status) && !dmar_tbl) {
551 pr_warn("Unable to map DMAR\n");
552 status = AE_NOT_FOUND;
555 return (ACPI_SUCCESS(status) ? 1 : 0);
558 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
559 size_t len, struct dmar_res_callback *cb)
562 struct acpi_dmar_header *iter, *next;
563 struct acpi_dmar_header *end = ((void *)start) + len;
565 for (iter = start; iter < end && ret == 0; iter = next) {
566 next = (void *)iter + iter->length;
567 if (iter->length == 0) {
568 /* Avoid looping forever on bad ACPI tables */
569 pr_debug(FW_BUG "Invalid 0-length structure\n");
571 } else if (next > end) {
572 /* Avoid passing table end */
573 pr_warn(FW_BUG "Record passes table end\n");
579 dmar_table_print_dmar_entry(iter);
581 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
582 /* continue for forward compatibility */
583 pr_debug("Unknown DMAR structure type %d\n",
585 } else if (cb->cb[iter->type]) {
586 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
587 } else if (!cb->ignore_unhandled) {
588 pr_warn("No handler for DMAR structure type %d\n",
597 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
598 struct dmar_res_callback *cb)
600 return dmar_walk_remapping_entries((void *)(dmar + 1),
601 dmar->header.length - sizeof(*dmar), cb);
605 * parse_dmar_table - parses the DMA reporting table
608 parse_dmar_table(void)
610 struct acpi_table_dmar *dmar;
613 struct dmar_res_callback cb = {
615 .ignore_unhandled = true,
616 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
617 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
618 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
619 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
620 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
621 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
625 * Do it again, earlier dmar_tbl mapping could be mapped with
631 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
632 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
634 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
636 dmar = (struct acpi_table_dmar *)dmar_tbl;
640 if (dmar->width < PAGE_SHIFT - 1) {
641 pr_warn("Invalid DMAR haw\n");
645 pr_info("Host address width %d\n", dmar->width + 1);
646 ret = dmar_walk_dmar_table(dmar, &cb);
647 if (ret == 0 && drhd_count == 0)
648 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
653 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
654 int cnt, struct pci_dev *dev)
660 for_each_active_dev_scope(devices, cnt, index, tmp)
661 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
664 /* Check our parent */
665 dev = dev->bus->self;
671 struct dmar_drhd_unit *
672 dmar_find_matched_drhd_unit(struct pci_dev *dev)
674 struct dmar_drhd_unit *dmaru;
675 struct acpi_dmar_hardware_unit *drhd;
677 dev = pci_physfn(dev);
680 for_each_drhd_unit(dmaru) {
681 drhd = container_of(dmaru->hdr,
682 struct acpi_dmar_hardware_unit,
685 if (dmaru->include_all &&
686 drhd->segment == pci_domain_nr(dev->bus))
689 if (dmar_pci_device_match(dmaru->devices,
690 dmaru->devices_cnt, dev))
700 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
701 struct acpi_device *adev)
703 struct dmar_drhd_unit *dmaru;
704 struct acpi_dmar_hardware_unit *drhd;
705 struct acpi_dmar_device_scope *scope;
708 struct acpi_dmar_pci_path *path;
710 for_each_drhd_unit(dmaru) {
711 drhd = container_of(dmaru->hdr,
712 struct acpi_dmar_hardware_unit,
715 for (scope = (void *)(drhd + 1);
716 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
717 scope = ((void *)scope) + scope->length) {
718 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
720 if (scope->enumeration_id != device_number)
723 path = (void *)(scope + 1);
724 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
725 dev_name(&adev->dev), dmaru->reg_base_addr,
726 scope->bus, path->device, path->function);
727 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
729 dmaru->devices[i].bus = scope->bus;
730 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
732 rcu_assign_pointer(dmaru->devices[i].dev,
733 get_device(&adev->dev));
736 BUG_ON(i >= dmaru->devices_cnt);
739 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
740 device_number, dev_name(&adev->dev));
743 static int __init dmar_acpi_dev_scope_init(void)
745 struct acpi_dmar_andd *andd;
747 if (dmar_tbl == NULL)
750 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
751 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
752 andd = ((void *)andd) + andd->header.length) {
753 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
755 struct acpi_device *adev;
757 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
760 pr_err("Failed to find handle for ACPI object %s\n",
764 if (acpi_bus_get_device(h, &adev)) {
765 pr_err("Failed to get device for ACPI object %s\n",
769 dmar_acpi_insert_dev_scope(andd->device_number, adev);
775 int __init dmar_dev_scope_init(void)
777 struct pci_dev *dev = NULL;
778 struct dmar_pci_notify_info *info;
780 if (dmar_dev_scope_status != 1)
781 return dmar_dev_scope_status;
783 if (list_empty(&dmar_drhd_units)) {
784 dmar_dev_scope_status = -ENODEV;
786 dmar_dev_scope_status = 0;
788 dmar_acpi_dev_scope_init();
790 for_each_pci_dev(dev) {
794 info = dmar_alloc_pci_notify_info(dev,
795 BUS_NOTIFY_ADD_DEVICE);
797 return dmar_dev_scope_status;
799 dmar_pci_bus_add_dev(info);
800 dmar_free_pci_notify_info(info);
804 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
807 return dmar_dev_scope_status;
811 int __init dmar_table_init(void)
813 static int dmar_table_initialized;
816 if (dmar_table_initialized == 0) {
817 ret = parse_dmar_table();
820 pr_info("Parse DMAR table failure.\n");
821 } else if (list_empty(&dmar_drhd_units)) {
822 pr_info("No DMAR devices found\n");
827 dmar_table_initialized = ret;
829 dmar_table_initialized = 1;
832 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
835 static void warn_invalid_dmar(u64 addr, const char *message)
838 1, TAINT_FIRMWARE_WORKAROUND,
839 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
840 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
842 dmi_get_system_info(DMI_BIOS_VENDOR),
843 dmi_get_system_info(DMI_BIOS_VERSION),
844 dmi_get_system_info(DMI_PRODUCT_VERSION));
848 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
850 struct acpi_dmar_hardware_unit *drhd;
854 drhd = (void *)entry;
855 if (!drhd->address) {
856 warn_invalid_dmar(0, "");
861 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
863 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
865 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
869 cap = dmar_readq(addr + DMAR_CAP_REG);
870 ecap = dmar_readq(addr + DMAR_ECAP_REG);
875 early_iounmap(addr, VTD_PAGE_SIZE);
877 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
878 warn_invalid_dmar(drhd->address, " returns all ones");
885 int __init detect_intel_iommu(void)
888 struct dmar_res_callback validate_drhd_cb = {
889 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
890 .ignore_unhandled = true,
893 down_write(&dmar_global_lock);
894 ret = dmar_table_detect();
896 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
898 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
900 /* Make sure ACS will be enabled */
906 x86_init.iommu.iommu_init = intel_iommu_init;
909 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
911 up_write(&dmar_global_lock);
913 return ret ? 1 : -ENODEV;
917 static void unmap_iommu(struct intel_iommu *iommu)
920 release_mem_region(iommu->reg_phys, iommu->reg_size);
924 * map_iommu: map the iommu's registers
925 * @iommu: the iommu to map
926 * @phys_addr: the physical address of the base resgister
928 * Memory map the iommu's registers. Start w/ a single page, and
929 * possibly expand if that turns out to be insufficent.
931 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
935 iommu->reg_phys = phys_addr;
936 iommu->reg_size = VTD_PAGE_SIZE;
938 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
939 pr_err("Can't reserve memory\n");
944 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
946 pr_err("Can't map the region\n");
951 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
952 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
954 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
956 warn_invalid_dmar(phys_addr, " returns all ones");
960 /* the registers might be more than one page */
961 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
962 cap_max_fault_reg_offset(iommu->cap));
963 map_size = VTD_PAGE_ALIGN(map_size);
964 if (map_size > iommu->reg_size) {
966 release_mem_region(iommu->reg_phys, iommu->reg_size);
967 iommu->reg_size = map_size;
968 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
970 pr_err("Can't reserve memory\n");
974 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
976 pr_err("Can't map the region\n");
987 release_mem_region(iommu->reg_phys, iommu->reg_size);
992 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
994 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
995 DMAR_UNITS_SUPPORTED);
996 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
999 set_bit(iommu->seq_id, dmar_seq_ids);
1000 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1003 return iommu->seq_id;
1006 static void dmar_free_seq_id(struct intel_iommu *iommu)
1008 if (iommu->seq_id >= 0) {
1009 clear_bit(iommu->seq_id, dmar_seq_ids);
1014 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1016 struct intel_iommu *iommu;
1022 if (!drhd->reg_base_addr) {
1023 warn_invalid_dmar(0, "");
1027 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1031 if (dmar_alloc_seq_id(iommu) < 0) {
1032 pr_err("Failed to allocate seq_id\n");
1037 err = map_iommu(iommu, drhd->reg_base_addr);
1039 pr_err("Failed to map %s\n", iommu->name);
1040 goto error_free_seq_id;
1044 agaw = iommu_calculate_agaw(iommu);
1046 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1050 msagaw = iommu_calculate_max_sagaw(iommu);
1052 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1057 iommu->msagaw = msagaw;
1058 iommu->segment = drhd->segment;
1062 ver = readl(iommu->reg + DMAR_VER_REG);
1063 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1065 (unsigned long long)drhd->reg_base_addr,
1066 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1067 (unsigned long long)iommu->cap,
1068 (unsigned long long)iommu->ecap);
1070 /* Reflect status in gcmd */
1071 sts = readl(iommu->reg + DMAR_GSTS_REG);
1072 if (sts & DMA_GSTS_IRES)
1073 iommu->gcmd |= DMA_GCMD_IRE;
1074 if (sts & DMA_GSTS_TES)
1075 iommu->gcmd |= DMA_GCMD_TE;
1076 if (sts & DMA_GSTS_QIES)
1077 iommu->gcmd |= DMA_GCMD_QIE;
1079 raw_spin_lock_init(&iommu->register_lock);
1081 if (intel_iommu_enabled) {
1082 iommu->iommu_dev = iommu_device_create(NULL, iommu,
1086 if (IS_ERR(iommu->iommu_dev)) {
1087 err = PTR_ERR(iommu->iommu_dev);
1092 drhd->iommu = iommu;
1099 dmar_free_seq_id(iommu);
1105 static void free_iommu(struct intel_iommu *iommu)
1107 iommu_device_destroy(iommu->iommu_dev);
1110 if (iommu->pr_irq) {
1111 free_irq(iommu->pr_irq, iommu);
1112 dmar_free_hwirq(iommu->pr_irq);
1115 free_irq(iommu->irq, iommu);
1116 dmar_free_hwirq(iommu->irq);
1121 free_page((unsigned long)iommu->qi->desc);
1122 kfree(iommu->qi->desc_status);
1129 dmar_free_seq_id(iommu);
1134 * Reclaim all the submitted descriptors which have completed its work.
1136 static inline void reclaim_free_desc(struct q_inval *qi)
1138 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1139 qi->desc_status[qi->free_tail] == QI_ABORT) {
1140 qi->desc_status[qi->free_tail] = QI_FREE;
1141 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1146 static int qi_check_fault(struct intel_iommu *iommu, int index)
1150 struct q_inval *qi = iommu->qi;
1151 int wait_index = (index + 1) % QI_LENGTH;
1153 if (qi->desc_status[wait_index] == QI_ABORT)
1156 fault = readl(iommu->reg + DMAR_FSTS_REG);
1159 * If IQE happens, the head points to the descriptor associated
1160 * with the error. No new descriptors are fetched until the IQE
1163 if (fault & DMA_FSTS_IQE) {
1164 head = readl(iommu->reg + DMAR_IQH_REG);
1165 if ((head >> DMAR_IQ_SHIFT) == index) {
1166 pr_err("VT-d detected invalid descriptor: "
1167 "low=%llx, high=%llx\n",
1168 (unsigned long long)qi->desc[index].low,
1169 (unsigned long long)qi->desc[index].high);
1170 memcpy(&qi->desc[index], &qi->desc[wait_index],
1171 sizeof(struct qi_desc));
1172 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1178 * If ITE happens, all pending wait_desc commands are aborted.
1179 * No new descriptors are fetched until the ITE is cleared.
1181 if (fault & DMA_FSTS_ITE) {
1182 head = readl(iommu->reg + DMAR_IQH_REG);
1183 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1185 tail = readl(iommu->reg + DMAR_IQT_REG);
1186 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1188 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1191 if (qi->desc_status[head] == QI_IN_USE)
1192 qi->desc_status[head] = QI_ABORT;
1193 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1194 } while (head != tail);
1196 if (qi->desc_status[wait_index] == QI_ABORT)
1200 if (fault & DMA_FSTS_ICE)
1201 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1207 * Submit the queued invalidation descriptor to the remapping
1208 * hardware unit and wait for its completion.
1210 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1213 struct q_inval *qi = iommu->qi;
1214 struct qi_desc *hw, wait_desc;
1215 int wait_index, index;
1216 unsigned long flags;
1226 raw_spin_lock_irqsave(&qi->q_lock, flags);
1227 while (qi->free_cnt < 3) {
1228 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1230 raw_spin_lock_irqsave(&qi->q_lock, flags);
1233 index = qi->free_head;
1234 wait_index = (index + 1) % QI_LENGTH;
1236 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1240 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1241 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1242 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1244 hw[wait_index] = wait_desc;
1246 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1250 * update the HW tail register indicating the presence of
1253 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1255 while (qi->desc_status[wait_index] != QI_DONE) {
1257 * We will leave the interrupts disabled, to prevent interrupt
1258 * context to queue another cmd while a cmd is already submitted
1259 * and waiting for completion on this cpu. This is to avoid
1260 * a deadlock where the interrupt context can wait indefinitely
1261 * for free slots in the queue.
1263 rc = qi_check_fault(iommu, index);
1267 raw_spin_unlock(&qi->q_lock);
1269 raw_spin_lock(&qi->q_lock);
1272 qi->desc_status[index] = QI_DONE;
1274 reclaim_free_desc(qi);
1275 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1284 * Flush the global interrupt entry cache.
1286 void qi_global_iec(struct intel_iommu *iommu)
1288 struct qi_desc desc;
1290 desc.low = QI_IEC_TYPE;
1293 /* should never fail */
1294 qi_submit_sync(&desc, iommu);
1297 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1300 struct qi_desc desc;
1302 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1303 | QI_CC_GRAN(type) | QI_CC_TYPE;
1306 qi_submit_sync(&desc, iommu);
1309 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1310 unsigned int size_order, u64 type)
1314 struct qi_desc desc;
1317 if (cap_write_drain(iommu->cap))
1320 if (cap_read_drain(iommu->cap))
1323 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1324 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1325 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1326 | QI_IOTLB_AM(size_order);
1328 qi_submit_sync(&desc, iommu);
1331 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1332 u64 addr, unsigned mask)
1334 struct qi_desc desc;
1337 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1338 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1339 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1341 desc.high = QI_DEV_IOTLB_ADDR(addr);
1343 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1346 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1349 qi_submit_sync(&desc, iommu);
1353 * Disable Queued Invalidation interface.
1355 void dmar_disable_qi(struct intel_iommu *iommu)
1357 unsigned long flags;
1359 cycles_t start_time = get_cycles();
1361 if (!ecap_qis(iommu->ecap))
1364 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1366 sts = readl(iommu->reg + DMAR_GSTS_REG);
1367 if (!(sts & DMA_GSTS_QIES))
1371 * Give a chance to HW to complete the pending invalidation requests.
1373 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1374 readl(iommu->reg + DMAR_IQH_REG)) &&
1375 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1378 iommu->gcmd &= ~DMA_GCMD_QIE;
1379 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1381 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1382 !(sts & DMA_GSTS_QIES), sts);
1384 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1388 * Enable queued invalidation.
1390 static void __dmar_enable_qi(struct intel_iommu *iommu)
1393 unsigned long flags;
1394 struct q_inval *qi = iommu->qi;
1396 qi->free_head = qi->free_tail = 0;
1397 qi->free_cnt = QI_LENGTH;
1399 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1401 /* write zero to the tail reg */
1402 writel(0, iommu->reg + DMAR_IQT_REG);
1404 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1406 iommu->gcmd |= DMA_GCMD_QIE;
1407 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1409 /* Make sure hardware complete it */
1410 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1412 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1416 * Enable Queued Invalidation interface. This is a must to support
1417 * interrupt-remapping. Also used by DMA-remapping, which replaces
1418 * register based IOTLB invalidation.
1420 int dmar_enable_qi(struct intel_iommu *iommu)
1423 struct page *desc_page;
1425 if (!ecap_qis(iommu->ecap))
1429 * queued invalidation is already setup and enabled.
1434 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1441 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1448 qi->desc = page_address(desc_page);
1450 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1451 if (!qi->desc_status) {
1452 free_page((unsigned long) qi->desc);
1458 raw_spin_lock_init(&qi->q_lock);
1460 __dmar_enable_qi(iommu);
1465 /* iommu interrupt handling. Most stuff are MSI-like. */
1473 static const char *dma_remap_fault_reasons[] =
1476 "Present bit in root entry is clear",
1477 "Present bit in context entry is clear",
1478 "Invalid context entry",
1479 "Access beyond MGAW",
1480 "PTE Write access is not set",
1481 "PTE Read access is not set",
1482 "Next page table ptr is invalid",
1483 "Root table address invalid",
1484 "Context table ptr is invalid",
1485 "non-zero reserved fields in RTP",
1486 "non-zero reserved fields in CTP",
1487 "non-zero reserved fields in PTE",
1488 "PCE for translation request specifies blocking",
1491 static const char *irq_remap_fault_reasons[] =
1493 "Detected reserved fields in the decoded interrupt-remapped request",
1494 "Interrupt index exceeded the interrupt-remapping table size",
1495 "Present field in the IRTE entry is clear",
1496 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1497 "Detected reserved fields in the IRTE entry",
1498 "Blocked a compatibility format interrupt request",
1499 "Blocked an interrupt request due to source-id verification failure",
1502 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1504 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1505 ARRAY_SIZE(irq_remap_fault_reasons))) {
1506 *fault_type = INTR_REMAP;
1507 return irq_remap_fault_reasons[fault_reason - 0x20];
1508 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1509 *fault_type = DMA_REMAP;
1510 return dma_remap_fault_reasons[fault_reason];
1512 *fault_type = UNKNOWN;
1518 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1520 if (iommu->irq == irq)
1521 return DMAR_FECTL_REG;
1522 else if (iommu->pr_irq == irq)
1523 return DMAR_PECTL_REG;
1528 void dmar_msi_unmask(struct irq_data *data)
1530 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1531 int reg = dmar_msi_reg(iommu, data->irq);
1535 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1536 writel(0, iommu->reg + reg);
1537 /* Read a reg to force flush the post write */
1538 readl(iommu->reg + reg);
1539 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1542 void dmar_msi_mask(struct irq_data *data)
1544 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1545 int reg = dmar_msi_reg(iommu, data->irq);
1549 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1550 writel(DMA_FECTL_IM, iommu->reg + reg);
1551 /* Read a reg to force flush the post write */
1552 readl(iommu->reg + reg);
1553 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1556 void dmar_msi_write(int irq, struct msi_msg *msg)
1558 struct intel_iommu *iommu = irq_get_handler_data(irq);
1559 int reg = dmar_msi_reg(iommu, irq);
1562 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1563 writel(msg->data, iommu->reg + reg + 4);
1564 writel(msg->address_lo, iommu->reg + reg + 8);
1565 writel(msg->address_hi, iommu->reg + reg + 12);
1566 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1569 void dmar_msi_read(int irq, struct msi_msg *msg)
1571 struct intel_iommu *iommu = irq_get_handler_data(irq);
1572 int reg = dmar_msi_reg(iommu, irq);
1575 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1576 msg->data = readl(iommu->reg + reg + 4);
1577 msg->address_lo = readl(iommu->reg + reg + 8);
1578 msg->address_hi = readl(iommu->reg + reg + 12);
1579 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1582 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1583 u8 fault_reason, u16 source_id, unsigned long long addr)
1588 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1590 if (fault_type == INTR_REMAP)
1591 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1592 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1593 PCI_FUNC(source_id & 0xFF), addr >> 48,
1594 fault_reason, reason);
1596 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1597 type ? "DMA Read" : "DMA Write",
1598 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1599 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1603 #define PRIMARY_FAULT_REG_LEN (16)
1604 irqreturn_t dmar_fault(int irq, void *dev_id)
1606 struct intel_iommu *iommu = dev_id;
1607 int reg, fault_index;
1611 static DEFINE_RATELIMIT_STATE(rs,
1612 DEFAULT_RATELIMIT_INTERVAL,
1613 DEFAULT_RATELIMIT_BURST);
1615 /* Disable printing, simply clear the fault when ratelimited */
1616 ratelimited = !__ratelimit(&rs);
1618 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1619 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1620 if (fault_status && !ratelimited)
1621 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1623 /* TBD: ignore advanced fault log currently */
1624 if (!(fault_status & DMA_FSTS_PPF))
1627 fault_index = dma_fsts_fault_record_index(fault_status);
1628 reg = cap_fault_reg_offset(iommu->cap);
1636 /* highest 32 bits */
1637 data = readl(iommu->reg + reg +
1638 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1639 if (!(data & DMA_FRCD_F))
1643 fault_reason = dma_frcd_fault_reason(data);
1644 type = dma_frcd_type(data);
1646 data = readl(iommu->reg + reg +
1647 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1648 source_id = dma_frcd_source_id(data);
1650 guest_addr = dmar_readq(iommu->reg + reg +
1651 fault_index * PRIMARY_FAULT_REG_LEN);
1652 guest_addr = dma_frcd_page_addr(guest_addr);
1655 /* clear the fault */
1656 writel(DMA_FRCD_F, iommu->reg + reg +
1657 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1659 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1662 dmar_fault_do_one(iommu, type, fault_reason,
1663 source_id, guest_addr);
1666 if (fault_index >= cap_num_fault_regs(iommu->cap))
1668 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1671 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1674 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1678 int dmar_set_interrupt(struct intel_iommu *iommu)
1683 * Check if the fault interrupt is already initialized.
1688 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1692 pr_err("No free IRQ vectors\n");
1696 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1698 pr_err("Can't request irq\n");
1702 int __init enable_drhd_fault_handling(void)
1704 struct dmar_drhd_unit *drhd;
1705 struct intel_iommu *iommu;
1708 * Enable fault control interrupt.
1710 for_each_iommu(iommu, drhd) {
1712 int ret = dmar_set_interrupt(iommu);
1715 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1716 (unsigned long long)drhd->reg_base_addr, ret);
1721 * Clear any previous faults.
1723 dmar_fault(iommu->irq, iommu);
1724 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1725 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1732 * Re-enable Queued Invalidation interface.
1734 int dmar_reenable_qi(struct intel_iommu *iommu)
1736 if (!ecap_qis(iommu->ecap))
1743 * First disable queued invalidation.
1745 dmar_disable_qi(iommu);
1747 * Then enable queued invalidation again. Since there is no pending
1748 * invalidation requests now, it's safe to re-enable queued
1751 __dmar_enable_qi(iommu);
1757 * Check interrupt remapping support in DMAR table description.
1759 int __init dmar_ir_support(void)
1761 struct acpi_table_dmar *dmar;
1762 dmar = (struct acpi_table_dmar *)dmar_tbl;
1765 return dmar->flags & 0x1;
1768 /* Check whether DMAR units are in use */
1769 static inline bool dmar_in_use(void)
1771 return irq_remapping_enabled || intel_iommu_enabled;
1774 static int __init dmar_free_unused_resources(void)
1776 struct dmar_drhd_unit *dmaru, *dmaru_n;
1781 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1782 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1784 down_write(&dmar_global_lock);
1785 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1786 list_del(&dmaru->list);
1787 dmar_free_drhd(dmaru);
1789 up_write(&dmar_global_lock);
1794 late_initcall(dmar_free_unused_resources);
1795 IOMMU_INIT_POST(detect_intel_iommu);
1798 * DMAR Hotplug Support
1799 * For more details, please refer to Intel(R) Virtualization Technology
1800 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1801 * "Remapping Hardware Unit Hot Plug".
1803 static u8 dmar_hp_uuid[] = {
1804 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1805 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1809 * Currently there's only one revision and BIOS will not check the revision id,
1810 * so use 0 for safety.
1812 #define DMAR_DSM_REV_ID 0
1813 #define DMAR_DSM_FUNC_DRHD 1
1814 #define DMAR_DSM_FUNC_ATSR 2
1815 #define DMAR_DSM_FUNC_RHSA 3
1817 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1819 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1822 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1823 dmar_res_handler_t handler, void *arg)
1826 union acpi_object *obj;
1827 struct acpi_dmar_header *start;
1828 struct dmar_res_callback callback;
1829 static int res_type[] = {
1830 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1831 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1832 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1835 if (!dmar_detect_dsm(handle, func))
1838 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1839 func, NULL, ACPI_TYPE_BUFFER);
1843 memset(&callback, 0, sizeof(callback));
1844 callback.cb[res_type[func]] = handler;
1845 callback.arg[res_type[func]] = arg;
1846 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1847 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1854 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1857 struct dmar_drhd_unit *dmaru;
1859 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1863 ret = dmar_ir_hotplug(dmaru, true);
1865 ret = dmar_iommu_hotplug(dmaru, true);
1870 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1874 struct dmar_drhd_unit *dmaru;
1876 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1881 * All PCI devices managed by this unit should have been destroyed.
1883 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1884 for_each_active_dev_scope(dmaru->devices,
1885 dmaru->devices_cnt, i, dev)
1889 ret = dmar_ir_hotplug(dmaru, false);
1891 ret = dmar_iommu_hotplug(dmaru, false);
1896 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1898 struct dmar_drhd_unit *dmaru;
1900 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1902 list_del_rcu(&dmaru->list);
1904 dmar_free_drhd(dmaru);
1910 static int dmar_hotplug_insert(acpi_handle handle)
1915 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1916 &dmar_validate_one_drhd, (void *)1);
1920 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1921 &dmar_parse_one_drhd, (void *)&drhd_count);
1922 if (ret == 0 && drhd_count == 0) {
1923 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1929 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1930 &dmar_parse_one_rhsa, NULL);
1934 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1935 &dmar_parse_one_atsr, NULL);
1939 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1940 &dmar_hp_add_drhd, NULL);
1944 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1945 &dmar_hp_remove_drhd, NULL);
1947 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1948 &dmar_release_one_atsr, NULL);
1950 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1951 &dmar_hp_release_drhd, NULL);
1956 static int dmar_hotplug_remove(acpi_handle handle)
1960 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1961 &dmar_check_one_atsr, NULL);
1965 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1966 &dmar_hp_remove_drhd, NULL);
1968 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1969 &dmar_release_one_atsr, NULL));
1970 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1971 &dmar_hp_release_drhd, NULL));
1973 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1974 &dmar_hp_add_drhd, NULL);
1980 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1981 void *context, void **retval)
1983 acpi_handle *phdl = retval;
1985 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1987 return AE_CTRL_TERMINATE;
1993 static int dmar_device_hotplug(acpi_handle handle, bool insert)
1996 acpi_handle tmp = NULL;
2002 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2005 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2007 dmar_get_dsm_handle,
2009 if (ACPI_FAILURE(status)) {
2010 pr_warn("Failed to locate _DSM method.\n");
2017 down_write(&dmar_global_lock);
2019 ret = dmar_hotplug_insert(tmp);
2021 ret = dmar_hotplug_remove(tmp);
2022 up_write(&dmar_global_lock);
2027 int dmar_device_add(acpi_handle handle)
2029 return dmar_device_hotplug(handle, true);
2032 int dmar_device_remove(acpi_handle handle)
2034 return dmar_device_hotplug(handle, false);