hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
--- - end != ~(dma_addr_t)0) {
--- - end = ~(dma_addr_t)0;
+
+ /* Get reserved DMA windows from host bridge */
+ resource_list_for_each_entry(window, &bridge->dma_ranges) {
+ end = window->res->start - window->offset;
+ resv_iova:
+ if (end > start) {
+ lo = iova_pfn(iovad, start);
+ hi = iova_pfn(iovad, end);
+ reserve_iova(iovad, lo, hi);
+ } else {
+ /* dma_ranges list should be sorted */
+ dev_err(&dev->dev, "Failed to reserve IOVA\n");
+ return -EINVAL;
+ }
+
+ start = window->res->end - window->offset + 1;
+ /* If window is last entry */
+ if (window->node.next == &bridge->dma_ranges &&
+++++ end != ~(phys_addr_t)0) {
+++++ end = ~(phys_addr_t)0;
+ goto resv_iova;
+ }
+ }
+
+ return 0;
}
static int iova_reserve_iommu_regions(struct device *dev,
* Maps the pages of the buffer in @pages into @vma. The caller is responsible
* for verifying the correct size and protection of @vma beforehand.
*/
--- -
--- -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
++++ +static int __iommu_dma_mmap(struct page **pages, size_t size,
++++ + struct vm_area_struct *vma)
+ {
+ return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
+ }
--- -static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
--- - size_t size, int prot, struct iommu_domain *domain)
- int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
++++ +static void iommu_dma_sync_single_for_cpu(struct device *dev,
++++ + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
--- - struct iommu_dma_cookie *cookie = domain->iova_cookie;
--- - size_t iova_off = 0;
--- - dma_addr_t iova;
- unsigned long uaddr = vma->vm_start;
- unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- int ret = -ENXIO;
++++ + phys_addr_t phys;
--- - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
--- - iova_off = iova_offset(&cookie->iovad, phys);
--- - size = iova_align(&cookie->iovad, size + iova_off);
- for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
- ret = vm_insert_page(vma, uaddr, pages[i]);
- if (ret)
- break;
- uaddr += PAGE_SIZE;
---- - }
- return ret;
++++ + if (dev_is_dma_coherent(dev))
++++ + return;
+
--- - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
--- - if (!iova)
--- - return DMA_MAPPING_ERROR;
++++ + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
++++ + arch_sync_dma_for_cpu(dev, phys, size, dir);
+++ +}
--- - if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
--- - iommu_dma_free_iova(cookie, iova, size);
--- - return DMA_MAPPING_ERROR;
--- - }
--- - return iova + iova_off;
- static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, struct iommu_domain *domain)
++++ +static void iommu_dma_sync_single_for_device(struct device *dev,
++++ + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+++ +{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
- dma_addr_t iova;
++++ + phys_addr_t phys;
+++ +
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
++++ + if (dev_is_dma_coherent(dev))
++++ + return;
+++ +
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
- return DMA_MAPPING_ERROR;
++++ + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
++++ + arch_sync_dma_for_device(dev, phys, size, dir);
++++ +}
+++ +
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size);
- return DMA_MAPPING_ERROR;
- }
- return iova + iova_off;
++++ +static void iommu_dma_sync_sg_for_cpu(struct device *dev,
++++ + struct scatterlist *sgl, int nelems,
++++ + enum dma_data_direction dir)
++++ +{
++++ + struct scatterlist *sg;
++++ + int i;
++++ +
++++ + if (dev_is_dma_coherent(dev))
++++ + return;
++++ +
++++ + for_each_sg(sgl, sg, nelems, i)
++++ + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
++++ +}
++++ +
++++ +static void iommu_dma_sync_sg_for_device(struct device *dev,
++++ + struct scatterlist *sgl, int nelems,
++++ + enum dma_data_direction dir)
++++ +{
++++ + struct scatterlist *sg;
++++ + int i;
++++ +
++++ + if (dev_is_dma_coherent(dev))
++++ + return;
++++ +
++++ + for_each_sg(sgl, sg, nelems, i)
++++ + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
---- -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
---- - unsigned long offset, size_t size, int prot)
++++ +static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
++++ + unsigned long offset, size_t size, enum dma_data_direction dir,
++++ + unsigned long attrs)
{
---- - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
---- - iommu_get_dma_domain(dev));
++++ + phys_addr_t phys = page_to_phys(page) + offset;
++++ + bool coherent = dev_is_dma_coherent(dev);
++++ + int prot = dma_info_to_prot(dir, coherent, attrs);
++++ + dma_addr_t dma_handle;
++++ +
++++ + dma_handle =__iommu_dma_map(dev, phys, size, prot);
++++ + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
++++ + dma_handle != DMA_MAPPING_ERROR)
++++ + arch_sync_dma_for_device(dev, phys, size, dir);
++++ + return dma_handle;
}
---- -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
---- - enum dma_data_direction dir, unsigned long attrs)
++++ +static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
++++ + size_t size, enum dma_data_direction dir, unsigned long attrs)
{
---- - __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
++++ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
++++ + iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
++++ + __iommu_dma_unmap(dev, dma_handle, size);
}
/*
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
spin_unlock_irqrestore(&cookie->msi_lock, flags);
- if (WARN_ON(!msi_page)) {
- /*
- * We're called from a void callback, so the best we can do is
- * 'fail' by filling the message with obviously bogus values.
- * Since we got this far due to an IOMMU being present, it's
- * not like the existing address would have worked anyway...
- */
- msg->address_hi = ~0U;
- msg->address_lo = ~0U;
- msg->data = ~0U;
- } else {
- msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(cookie) - 1;
- msg->address_lo += lower_32_bits(msi_page->iova);
- }
+ msi_desc_set_iommu_cookie(desc, msi_page);
+
+ if (!msi_page)
+ return -ENOMEM;
+ return 0;
+ }
+
+ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+ struct msi_msg *msg)
+ {
+ struct device *dev = msi_desc_to_dev(desc);
+ const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ const struct iommu_dma_msi_page *msi_page;
+
+ msi_page = msi_desc_get_iommu_cookie(desc);
+
+ if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
+ return;
+
+ msg->address_hi = upper_32_bits(msi_page->iova);
+ msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+ msg->address_lo += lower_32_bits(msi_page->iova);
+ }
++++ +
++++ +static int iommu_dma_init(void)
++++ +{
++++ + return iova_cache_get();
+++ +}
++++ +arch_initcall(iommu_dma_init);
}
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
- {
- int ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
-
- return 0;
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+ {
-- -- int ret;
+++++ int iommu_id, ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
-- -- /* Notify about the new mapping */
-- -- if (domain_type_is_vm(domain)) {
-- -- /* VM typed domains can have more than one IOMMUs */
-- -- int iommu_id;
-- --
-- -- for_each_domain_iommu(iommu_id, domain) {
-- -- iommu = g_iommus[iommu_id];
-- -- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
-- -- }
-- -- } else {
-- -- /* General domains only have one IOMMU */
-- -- iommu = domain_get_iommu(domain);
+++++ for_each_domain_iommu(iommu_id, domain) {
+++++ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+
+ return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
*/
if (!pci_is_pcie(pdev)) {
if (!pci_is_root_bus(pdev->bus))
----- return 0;
+++++ return IOMMU_DOMAIN_DMA;
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
----- return 0;
+++++ return IOMMU_DOMAIN_DMA;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
----- return 0;
+++++ return IOMMU_DOMAIN_DMA;
} else {
if (device_has_rmrr(dev))
----- return 0;
- - }
- -
- - /*
- - * At boot time, we don't yet know if devices will be 64-bit capable.
- - * Assume that they will — if they turn out not to be, then we can
- - * take them out of the 1:1 domain later.
- - */
- - if (!startup) {
- - /*
- - * If the device's dma_mask is less than the system's memory
- - * size then this is not a candidate for identity mapping.
- - */
- - u64 dma_mask = *dev->dma_mask;
- -
- - if (dev->coherent_dma_mask &&
- - dev->coherent_dma_mask < dma_mask)
- - dma_mask = dev->coherent_dma_mask;
- -
- - return dma_mask >= dma_get_required_mask(dev);
- - }
- -
- - return 1;
- - }
- -
- - static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
- - {
- - int ret;
- -
- - if (!iommu_should_identity_map(dev, 1))
- - return 0;
- -
- - ret = domain_add_dev_info(si_domain, dev);
- - if (!ret)
- - dev_info(dev, "%s identity mapping\n",
- - hw ? "Hardware" : "Software");
- - else if (ret == -ENODEV)
- - /* device not associated with an iommu */
- - ret = 0;
- -
- - return ret;
- - }
- -
- -
- - static int __init iommu_prepare_static_identity_mapping(int hw)
- - {
- - struct pci_dev *pdev = NULL;
- - struct dmar_drhd_unit *drhd;
- - /* To avoid a -Wunused-but-set-variable warning. */
- - struct intel_iommu *iommu __maybe_unused;
- - struct device *dev;
- - int i;
- - int ret = 0;
- -
- - for_each_pci_dev(pdev) {
- - ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- - if (ret)
- - return ret;
+++++ return IOMMU_DOMAIN_DMA;
}
- - /*
- - * At boot time, we don't yet know if devices will be 64-bit capable.
- - * Assume that they will — if they turn out not to be, then we can
- - * take them out of the 1:1 domain later.
- - */
- - if (!startup) {
- - /*
- - * If the device's dma_mask is less than the system's memory
- - * size then this is not a candidate for identity mapping.
- - */
- - u64 dma_mask = *dev->dma_mask;
- -
- - if (dev->coherent_dma_mask &&
- - dev->coherent_dma_mask < dma_mask)
- - dma_mask = dev->coherent_dma_mask;
- -
- - return dma_mask >= dma_get_required_mask(dev);
- - }
- -
- - return 1;
- -}
- -
- -static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
- -{
- - int ret;
- -
- - if (!iommu_should_identity_map(dev, 1))
- - return 0;
- -
- - ret = domain_add_dev_info(si_domain, dev);
- - if (!ret)
- - dev_info(dev, "%s identity mapping\n",
- - hw ? "Hardware" : "Software");
- - else if (ret == -ENODEV)
- - /* device not associated with an iommu */
- - ret = 0;
- -
- - return ret;
- -}
- -
- -
- -static int __init iommu_prepare_static_identity_mapping(int hw)
- -{
- - struct pci_dev *pdev = NULL;
- - struct dmar_drhd_unit *drhd;
- - struct intel_iommu *iommu;
- - struct device *dev;
- - int i;
- - int ret = 0;
- -
- - for_each_pci_dev(pdev) {
- - ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- - if (ret)
- - return ret;
- - }
- -
-- -- for_each_active_iommu(iommu, drhd)
-- -- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
-- -- struct acpi_device_physical_node *pn;
-- -- struct acpi_device *adev;
-- --
-- -- if (dev->bus != &acpi_bus_type)
-- -- continue;
-- --
-- -- adev= to_acpi_device(dev);
-- -- mutex_lock(&adev->physical_node_lock);
-- -- list_for_each_entry(pn, &adev->physical_node_list, node) {
-- -- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
-- -- if (ret)
-- -- break;
-- -- }
-- -- mutex_unlock(&adev->physical_node_lock);
-- -- if (ret)
-- -- return ret;
-- -- }
-- --
-- -- return 0;
+++++ return (iommu_identity_mapping & IDENTMAP_ALL) ?
+++++ IOMMU_DOMAIN_IDENTITY : 0;
+ }
+
+ static void intel_iommu_init_qi(struct intel_iommu *iommu)
+ {
/*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will — if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
+ * Start from the sane iommu hardware state.
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
*/
- if (!startup) {
+ if (!iommu->qi) {
/*
- * If the device's dma_mask is less than the system's memory
- * size then this is not a candidate for identity mapping.
+ * Clear any previous faults.
*/
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask &&
- dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- return dma_mask >= dma_get_required_mask(dev);
- }
-
- return 1;
- }
-
- static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
- {
- int ret;
-
- if (!iommu_should_identity_map(dev, 1))
- return 0;
-
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret)
- dev_info(dev, "%s identity mapping\n",
- hw ? "Hardware" : "Software");
- else if (ret == -ENODEV)
- /* device not associated with an iommu */
- ret = 0;
-
- return ret;
- }
-
-
- static int __init iommu_prepare_static_identity_mapping(int hw)
- {
- struct pci_dev *pdev = NULL;
- struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
- struct device *dev;
- int i;
- int ret = 0;
-
- for_each_pci_dev(pdev) {
- ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- if (ret)
- return ret;
- }
-
- for_each_active_iommu(iommu, drhd)
- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
- struct acpi_device_physical_node *pn;
- struct acpi_device *adev;
-
- if (dev->bus != &acpi_bus_type)
- continue;
-
- adev= to_acpi_device(dev);
- mutex_lock(&adev->physical_node_lock);
- list_for_each_entry(pn, &adev->physical_node_list, node) {
- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
- if (ret)
- break;
- }
- mutex_unlock(&adev->physical_node_lock);
- if (ret)
- return ret;
- }
-
- return 0;
- }
-
- static void intel_iommu_init_qi(struct intel_iommu *iommu)
- {
- /*
- * Start from the sane iommu hardware state.
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (!iommu->qi) {
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
}
if (dmar_enable_qi(iommu)) {
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
- check_tylersburg_isoch();
-
- if (iommu_identity_mapping) {
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
- }
-
-
- /*
- * If we copied translations from a previous kernel in the kdump
- * case, we can not assign the devices to domains now, as that
- * would eliminate the old mappings. So skip this part and defer
- * the assignment to device driver initialization time.
- */
- if (copied_tables)
- goto domains_done;
-
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- pr_crit("Failed to setup IOMMU pass-through\n");
- goto free_iommu;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- pr_info("Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- /* some BIOS lists non-exist devices in DMAR table. */
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- ret = iommu_prepare_rmrr_dev(rmrr, dev);
- if (ret)
- pr_err("Mapping reserved region failed\n");
- }
- }
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
- iommu_prepare_isa();
+ check_tylersburg_isoch();
-- -- if (iommu_identity_mapping) {
-- -- ret = si_domain_init(hw_pass_through);
-- -- if (ret)
-- -- goto free_iommu;
-- -- }
-- --
-- --
-- -- /*
-- -- * If we copied translations from a previous kernel in the kdump
-- -- * case, we can not assign the devices to domains now, as that
-- -- * would eliminate the old mappings. So skip this part and defer
-- -- * the assignment to device driver initialization time.
-- -- */
-- -- if (copied_tables)
-- -- goto domains_done;
-- --
-- -- /*
-- -- * If pass through is not set or not enabled, setup context entries for
-- -- * identity mappings for rmrr, gfx, and isa and may fall back to static
-- -- * identity mapping if iommu_identity_mapping is set.
-- -- */
-- -- if (iommu_identity_mapping) {
-- -- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
-- -- if (ret) {
-- -- pr_crit("Failed to setup IOMMU pass-through\n");
-- -- goto free_iommu;
-- -- }
-- -- }
-- -- /*
-- -- * For each rmrr
-- -- * for each dev attached to rmrr
-- -- * do
-- -- * locate drhd for dev, alloc domain for dev
-- -- * allocate free domain
-- -- * allocate page table entries for rmrr
-- -- * if context not allocated for bus
-- -- * allocate and init context
-- -- * set present in root table for this bus
-- -- * init context with domain, translation etc
-- -- * endfor
-- -- * endfor
-- -- */
-- -- pr_info("Setting RMRR:\n");
-- -- for_each_rmrr_units(rmrr) {
-- -- /* some BIOS lists non-exist devices in DMAR table. */
-- -- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
-- -- i, dev) {
-- -- ret = iommu_prepare_rmrr_dev(rmrr, dev);
-- -- if (ret)
-- -- pr_err("Mapping reserved region failed\n");
-- -- }
-- -- }
-- --
-- -- iommu_prepare_isa();
-- --
-----domains_done:
+++++ ret = si_domain_init(hw_pass_through);
+++++ if (ret)
+++++ goto free_iommu;
/*
* for each drhd
}
/* Check if the dev needs to go through non-identity map and unmap process.*/
- static int iommu_no_mapping(struct device *dev)
+ static bool iommu_need_mapping(struct device *dev)
{
----- int found;
+++++ int ret;
if (iommu_dummy(dev))
- return 1;
+ return false;
----- if (!iommu_identity_mapping)
-- -- return true;
- return 0;
+++++ ret = identity_mapping(dev);
+++++ if (ret) {
+++++ u64 dma_mask = *dev->dma_mask;
+
-- -- found = identity_mapping(dev);
-- -- if (found) {
-- -- if (iommu_should_identity_map(dev, 0))
+++++ if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
+++++ dma_mask = dev->coherent_dma_mask;
+++++
+++++ if (dma_mask >= dma_get_required_mask(dev))
+ return false;
- found = identity_mapping(dev);
- if (found) {
- if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- return 0;
- }
- } else {
/*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
+ * 32 bit DMA is removed from si_domain and fall back to
+ * non-identity mapping.
*/
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return 1;
+ dmar_remove_one_dev_info(dev);
-- -- dev_info(dev, "32bit DMA uses non-identity mapping\n");
-- -- } else {
-- -- /*
-- -- * In case of a detached 64 bit DMA device from vm, the device
-- -- * is put into si_domain for identity mapping.
-- -- */
-- -- if (iommu_should_identity_map(dev, 0) &&
-- -- !domain_add_dev_info(si_domain, dev)) {
-- -- dev_info(dev, "64bit DMA uses identity mapping\n");
-- -- return false;
+++++ ret = iommu_request_dma_domain_for_dev(dev);
+++++ if (ret) {
+++++ struct iommu_domain *domain;
+++++ struct dmar_domain *dmar_domain;
+++++
+++++ domain = iommu_get_domain_for_dev(dev);
+++++ if (domain) {
+++++ dmar_domain = to_dmar_domain(domain);
+++++ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
++ ++ }
+++++ get_private_domain_for_dev(dev);
}
+++++
+++++ dev_info(dev, "32bit DMA uses non-identity mapping\n");
}
- return 0;
+ return true;
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
-
----- domain = get_valid_domain_for_dev(dev);
+++++ domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
----- domain = get_valid_domain_for_dev(dev);
+++++ domain = find_domain(dev);
if (!domain)
return 0;
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
----- if (type != IOMMU_DOMAIN_UNMANAGED)
----- return NULL;
+++++ switch (type) {
+++++ case IOMMU_DOMAIN_DMA:
+++++ /* fallthrough */
+++++ case IOMMU_DOMAIN_UNMANAGED:
+++++ dmar_domain = alloc_domain(0);
+++++ if (!dmar_domain) {
+++++ pr_err("Can't allocate dmar_domain\n");
+++++ return NULL;
+++++ }
+++++ if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+++++ pr_err("Domain initialization failed\n");
+++++ domain_exit(dmar_domain);
+++++ return NULL;
+++++ }
----- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
----- if (!dmar_domain) {
----- pr_err("Can't allocate dmar_domain\n");
----- return NULL;
----- }
----- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
----- pr_err("Domain initialization failed\n");
----- domain_exit(dmar_domain);
+++++ if (type == IOMMU_DOMAIN_DMA &&
+++++ init_iova_flush_queue(&dmar_domain->iovad,
+++++ iommu_flush_iova, iova_entry_free)) {
+++++ pr_warn("iova flush queue initialization failed\n");
+++++ intel_iommu_strict = 1;
+++++ }
+++++
+++++ domain_update_iommu_cap(dmar_domain);
+++++
+++++ domain = &dmar_domain->domain;
+++++ domain->geometry.aperture_start = 0;
+++++ domain->geometry.aperture_end =
+++++ __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+++++ domain->geometry.force_aperture = true;
+++++
+++++ return domain;
+++++ case IOMMU_DOMAIN_IDENTITY:
+++++ return &si_domain->domain;
+++++ default:
return NULL;
}
----- domain_update_iommu_cap(dmar_domain);
---
--- domain = &dmar_domain->domain;
--- domain->geometry.aperture_start = 0;
--- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
--- domain->geometry.force_aperture = true;
- - domain = &dmar_domain->domain;
- - domain->geometry.aperture_start = 0;
- - domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- - domain->geometry.force_aperture = true;
- -
----- return domain;
+++++ return NULL;
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
----- domain_exit(to_dmar_domain(domain));
+++++ if (domain != &si_domain->domain)
+++++ domain_exit(to_dmar_domain(domain));
}
- static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ /*
+ * Check whether a @domain could be attached to the @dev through the
+ * aux-domain attach/detach APIs.
+ */
+ static inline bool
+ is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
+ struct device_domain_info *info = dev->archdata.iommu;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
+ return info && info->auxd_enabled &&
+ domain->type == IOMMU_DOMAIN_UNMANAGED;
+ }
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+ static void auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
+ {
+ struct device_domain_info *info = dev->archdata.iommu;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ domain->auxd_refcnt++;
+ list_add(&domain->auxd, &info->auxiliary_domains);
+ }
+
+ static void auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
+ {
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&domain->auxd);
+ domain->auxd_refcnt--;
+
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+ }
+
+ static int aux_domain_add_dev(struct dmar_domain *domain,
+ struct device *dev)
+ {
+ int ret;
+ u8 bus, devfn;
+ unsigned long flags;
+ struct intel_iommu *iommu;
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (domain->default_pasid <= 0) {
+ int pasid;
+
+ pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+ pci_max_pasids(to_pci_dev(dev)),
+ GFP_KERNEL);
+ if (pasid <= 0) {
+ pr_err("Can't allocate default pasid\n");
+ return -ENODEV;
}
+ domain->default_pasid = pasid;
}
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * iommu->lock must be held to attach domain to iommu and setup the
+ * pasid entry for second level translation.
+ */
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ if (ret)
+ goto attach_failed;
+
+ /* Setup the PASID entry for mediated devices: */
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid);
+ if (ret)
+ goto table_failed;
+ spin_unlock(&iommu->lock);
+
+ auxiliary_link_device(domain, dev);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+
+ table_failed:
+ domain_detach_iommu(domain, iommu);
+ attach_failed:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+
+ return ret;
+ }
+
+ static void aux_domain_remove_dev(struct dmar_domain *domain,
+ struct device *dev)
+ {
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+
+ if (!is_aux_domain(dev, &domain->domain))
+ return;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ iommu = info->iommu;
+
+ auxiliary_unlink_device(domain, dev);
+
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ }
+
+ static int prepare_domain_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+ {
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu;
+ int addr_width;
+ u8 bus, devfn;
+
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev);
+ return 0;
+ }
+
+ static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+ {
+ int ret;
+
-- -- if (device_is_rmrr_locked(dev)) {
+++++ if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
+++++ device_is_rmrr_locked(dev)) {
+ dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
+ return -EPERM;
+ }
+
+ if (is_aux_domain(dev, domain))
+ return -EPERM;
+
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
+ struct dmar_domain *old_domain;
+
+ old_domain = find_domain(dev);
-- -- if (old_domain) {
-- -- rcu_read_lock();
+++++ if (old_domain)
+ dmar_remove_one_dev_info(dev);
-- -- rcu_read_unlock();
-- --
-- -- if (!domain_type_is_vm_or_si(old_domain) &&
-- -- list_empty(&old_domain->devices))
-- -- domain_exit(old_domain);
-- -- }
+ }
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return domain_add_dev_info(to_dmar_domain(domain), dev);
+ }
+
+ static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+ {
+ int ret;
+
+ if (!is_aux_domain(dev, domain))
+ return -EPERM;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return aux_domain_add_dev(to_dmar_domain(domain), dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
{
struct iommu_resv_region *entry, *next;
----- list_for_each_entry_safe(entry, next, head, list) {
----- if (entry->type == IOMMU_RESV_MSI)
----- kfree(entry);
----- }
+++++ list_for_each_entry_safe(entry, next, head, list)
+++++ kfree(entry);
}
- #ifdef CONFIG_INTEL_IOMMU_SVM
- int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info;
struct context_entry *context;
u64 ctx_lo;
int ret;
-- -- domain = get_valid_domain_for_dev(dev);
- domain = get_valid_domain_for_dev(sdev->dev);
+++++ domain = find_domain(dev);
if (!domain)
return -EINVAL;
return ret;
}
+++++static void intel_iommu_apply_resv_region(struct device *dev,
+++++ struct iommu_domain *domain,
+++++ struct iommu_resv_region *region)
+++++{
+++++ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+++++ unsigned long start, end;
+++++
+++++ start = IOVA_PFN(region->start);
+++++ end = IOVA_PFN(region->start + region->length - 1);
+++++
+++++ WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+++++}
+++++
+ #ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
+ static int intel_iommu_enable_auxd(struct device *dev)
+ {
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
+ int ret;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu || dmar_disabled)
+ return -EINVAL;
+
+ if (!sm_supported(iommu) || !pasid_supported(iommu))
+ return -EINVAL;
+
+ ret = intel_iommu_enable_pasid(iommu, dev);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ info->auxd_enabled = 1;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+ }
+
+ static int intel_iommu_disable_auxd(struct device *dev)
+ {
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ if (!WARN_ON(!info))
+ info->auxd_enabled = 0;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+ }
+
+ /*
+ * A PCI express designated vendor specific extended capability is defined
+ * in the section 3.7 of Intel scalable I/O virtualization technical spec
+ * for system software and tools to detect endpoint devices supporting the
+ * Intel scalable IO virtualization without host driver dependency.
+ *
+ * Returns the address of the matching extended capability structure within
+ * the device's PCI configuration space or 0 if the device does not support
+ * it.
+ */
+ static int siov_find_pci_dvsec(struct pci_dev *pdev)
+ {
+ int pos;
+ u16 vendor, id;
+
+ pos = pci_find_next_ext_capability(pdev, 0, 0x23);
+ while (pos) {
+ pci_read_config_word(pdev, pos + 4, &vendor);
+ pci_read_config_word(pdev, pos + 8, &id);
+ if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos, 0x23);
+ }
+
+ return 0;
+ }
+
+ static bool
+ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
+ {
+ if (feat == IOMMU_DEV_FEAT_AUX) {
+ int ret;
+
+ if (!dev_is_pci(dev) || dmar_disabled ||
+ !scalable_mode_support() || !iommu_pasid_support())
+ return false;
+
+ ret = pci_pasid_features(to_pci_dev(dev));
+ if (ret < 0)
+ return false;
+
+ return !!siov_find_pci_dvsec(to_pci_dev(dev));
+ }
+
+ return false;
+ }
+
+ static int
+ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+ {
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_enable_auxd(dev);
+
+ return -ENODEV;
+ }
+
+ static int
+ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+ {
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_disable_auxd(dev);
+
+ return -ENODEV;
+ }
+
+ static bool
+ intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+ {
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return scalable_mode_support() && info && info->auxd_enabled;
+
+ return false;
+ }
+
+ static int
+ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+ {
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ return dmar_domain->default_pasid > 0 ?
+ dmar_domain->default_pasid : -EINVAL;
+ }
+
+++++static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+++++ struct device *dev)
+++++{
+++++ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+++++}
+++++
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.remove_device = intel_iommu_remove_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
+++++ .apply_resv_region = intel_iommu_apply_resv_region,
.device_group = pci_device_group,
+ .dev_has_feat = intel_iommu_dev_has_feat,
+ .dev_feat_enabled = intel_iommu_dev_feat_enabled,
+ .dev_enable_feat = intel_iommu_dev_enable_feat,
+ .dev_disable_feat = intel_iommu_dev_disable_feat,
+++++ .is_attach_deferred = intel_iommu_is_attach_deferred,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};