1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table allocator.
5 * Copyright (C) 2020 Advanced Micro Devices, Inc.
6 * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 #define pr_fmt(fmt) "AMD-Vi: " fmt
10 #define dev_fmt(fmt) pr_fmt(fmt)
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #include "amd_iommu_types.h"
24 #include "amd_iommu.h"
26 static void v1_tlb_flush_all(void *cookie)
30 static void v1_tlb_flush_walk(unsigned long iova, size_t size,
31 size_t granule, void *cookie)
35 static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
36 unsigned long iova, size_t granule,
41 static const struct iommu_flush_ops v1_flush_ops = {
42 .tlb_flush_all = v1_tlb_flush_all,
43 .tlb_flush_walk = v1_tlb_flush_walk,
44 .tlb_add_page = v1_tlb_add_page,
48 * Helper function to get the first pte of a large mapping
50 static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
53 unsigned long pte_mask, pg_size, cnt;
56 pg_size = PTE_PAGE_SIZE(*pte);
57 cnt = PAGE_SIZE_PTE_COUNT(pg_size);
58 pte_mask = ~((cnt << 3) - 1);
59 fpte = (u64 *)(((unsigned long)pte) & pte_mask);
70 /****************************************************************************
72 * The functions below are used the create the page table mappings for
73 * unity mapped regions.
75 ****************************************************************************/
77 static void free_pt_page(u64 *pt, struct list_head *freelist)
79 struct page *p = virt_to_page(pt);
81 list_add_tail(&p->lru, freelist);
84 static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
89 for (i = 0; i < 512; ++i) {
91 if (!IOMMU_PTE_PRESENT(pt[i]))
95 if (PM_PTE_LEVEL(pt[i]) == 0 ||
96 PM_PTE_LEVEL(pt[i]) == 7)
100 * Free the next level. No need to look at l1 tables here since
101 * they can only contain leaf PTEs; just free them directly.
103 p = IOMMU_PTE_PAGE(pt[i]);
105 free_pt_lvl(p, freelist, lvl - 1);
107 free_pt_page(p, freelist);
110 free_pt_page(pt, freelist);
113 static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
117 case PAGE_MODE_7_LEVEL:
119 case PAGE_MODE_1_LEVEL:
120 free_pt_page(root, freelist);
122 case PAGE_MODE_2_LEVEL:
123 case PAGE_MODE_3_LEVEL:
124 case PAGE_MODE_4_LEVEL:
125 case PAGE_MODE_5_LEVEL:
126 case PAGE_MODE_6_LEVEL:
127 free_pt_lvl(root, freelist, mode);
134 void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
139 /* lowest 3 bits encode pgtable mode */
141 pt_root |= (u64)root;
143 amd_iommu_domain_set_pt_root(domain, pt_root);
147 * This function is used to add another level to an IO page table. Adding
148 * another level increases the size of the address space by 9 bits to a size up
151 static bool increase_address_space(struct protection_domain *domain,
152 unsigned long address,
159 pte = (void *)get_zeroed_page(gfp);
163 spin_lock_irqsave(&domain->lock, flags);
165 if (address <= PM_LEVEL_SIZE(domain->iop.mode))
169 if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
172 *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
174 domain->iop.root = pte;
175 domain->iop.mode += 1;
176 amd_iommu_update_and_flush_device_table(domain);
177 amd_iommu_domain_flush_complete(domain);
180 * Device Table needs to be updated and flushed before the new root can
183 amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
189 spin_unlock_irqrestore(&domain->lock, flags);
190 free_page((unsigned long)pte);
195 static u64 *alloc_pte(struct protection_domain *domain,
196 unsigned long address,
197 unsigned long page_size,
205 BUG_ON(!is_power_of_2(page_size));
207 while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
209 * Return an error if there is no memory to update the
212 if (!increase_address_space(domain, address, gfp))
217 level = domain->iop.mode - 1;
218 pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
219 address = PAGE_SIZE_ALIGN(address, page_size);
220 end_lvl = PAGE_SIZE_LEVEL(page_size);
222 while (level > end_lvl) {
227 pte_level = PM_PTE_LEVEL(__pte);
230 * If we replace a series of large PTEs, we need
231 * to tear down all of them.
233 if (IOMMU_PTE_PRESENT(__pte) &&
234 pte_level == PAGE_MODE_7_LEVEL) {
235 unsigned long count, i;
238 lpte = first_pte_l7(pte, NULL, &count);
241 * Unmap the replicated PTEs that still match the
242 * original large mapping
244 for (i = 0; i < count; ++i)
245 cmpxchg64(&lpte[i], __pte, 0ULL);
251 if (!IOMMU_PTE_PRESENT(__pte) ||
252 pte_level == PAGE_MODE_NONE) {
253 page = (u64 *)get_zeroed_page(gfp);
258 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
260 /* pte could have been changed somewhere. */
261 if (!try_cmpxchg64(pte, &__pte, __npte))
262 free_page((unsigned long)page);
263 else if (IOMMU_PTE_PRESENT(__pte))
269 /* No level skipping support yet */
270 if (pte_level != level)
275 pte = IOMMU_PTE_PAGE(__pte);
277 if (pte_page && level == end_lvl)
280 pte = &pte[PM_LEVEL_INDEX(level, address)];
287 * This function checks if there is a PTE for a given dma address. If
288 * there is one, it returns the pointer to it.
290 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
291 unsigned long address,
292 unsigned long *page_size)
299 if (address > PM_LEVEL_SIZE(pgtable->mode))
302 level = pgtable->mode - 1;
303 pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
304 *page_size = PTE_LEVEL_PAGE_SIZE(level);
309 if (!IOMMU_PTE_PRESENT(*pte))
313 if (PM_PTE_LEVEL(*pte) == 7 ||
314 PM_PTE_LEVEL(*pte) == 0)
317 /* No level skipping support yet */
318 if (PM_PTE_LEVEL(*pte) != level)
323 /* Walk to the next level */
324 pte = IOMMU_PTE_PAGE(*pte);
325 pte = &pte[PM_LEVEL_INDEX(level, address)];
326 *page_size = PTE_LEVEL_PAGE_SIZE(level);
330 * If we have a series of large PTEs, make
331 * sure to return a pointer to the first one.
333 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
334 pte = first_pte_l7(pte, page_size, NULL);
339 static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
344 while (!try_cmpxchg64(pte, &pteval, 0))
345 pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
347 if (!IOMMU_PTE_PRESENT(pteval))
350 pt = IOMMU_PTE_PAGE(pteval);
351 mode = IOMMU_PTE_MODE(pteval);
353 free_sub_pt(pt, mode, freelist);
357 * Generic mapping functions. It maps a physical address into a DMA
358 * address space. It allocates the page table pages if necessary.
359 * In the future it can be extended to a generic mapping function
360 * supporting all features of AMD IOMMU page tables like level skipping
361 * and full 64 bit address spaces.
363 static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
364 phys_addr_t paddr, size_t pgsize, size_t pgcount,
365 int prot, gfp_t gfp, size_t *mapped)
367 struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
369 bool updated = false;
373 BUG_ON(!IS_ALIGNED(iova, pgsize));
374 BUG_ON(!IS_ALIGNED(paddr, pgsize));
377 if (!(prot & IOMMU_PROT_MASK))
380 while (pgcount > 0) {
381 count = PAGE_SIZE_PTE_COUNT(pgsize);
382 pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
388 for (i = 0; i < count; ++i)
389 free_clear_pte(&pte[i], pte[i], &freelist);
391 if (!list_empty(&freelist))
395 __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
396 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
398 __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
400 if (prot & IOMMU_PROT_IR)
401 __pte |= IOMMU_PTE_IR;
402 if (prot & IOMMU_PROT_IW)
403 __pte |= IOMMU_PTE_IW;
405 for (i = 0; i < count; ++i)
421 spin_lock_irqsave(&dom->lock, flags);
423 * Flush domain TLB(s) and wait for completion. Any Device-Table
424 * Updates and flushing already happened in
425 * increase_address_space().
427 amd_iommu_domain_flush_tlb_pde(dom);
428 amd_iommu_domain_flush_complete(dom);
429 spin_unlock_irqrestore(&dom->lock, flags);
432 /* Everything flushed out, free pages now */
433 put_pages_list(&freelist);
438 static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
440 size_t pgsize, size_t pgcount,
441 struct iommu_iotlb_gather *gather)
443 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
444 unsigned long long unmapped;
445 unsigned long unmap_size;
447 size_t size = pgcount << __ffs(pgsize);
449 BUG_ON(!is_power_of_2(pgsize));
453 while (unmapped < size) {
454 pte = fetch_pte(pgtable, iova, &unmap_size);
458 count = PAGE_SIZE_PTE_COUNT(unmap_size);
459 for (i = 0; i < count; i++)
465 iova = (iova & ~(unmap_size - 1)) + unmap_size;
466 unmapped += unmap_size;
472 static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
474 struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
475 unsigned long offset_mask, pte_pgsize;
478 pte = fetch_pte(pgtable, iova, &pte_pgsize);
480 if (!pte || !IOMMU_PTE_PRESENT(*pte))
483 offset_mask = pte_pgsize - 1;
484 __pte = __sme_clr(*pte & PM_ADDR_MASK);
486 return (__pte & ~offset_mask) | (iova & offset_mask);
490 * ----------------------------------------------------
492 static void v1_free_pgtable(struct io_pgtable *iop)
494 struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
495 struct protection_domain *dom;
498 if (pgtable->mode == PAGE_MODE_NONE)
501 dom = container_of(pgtable, struct protection_domain, iop);
503 /* Page-table is not visible to IOMMU anymore, so free it */
504 BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
505 pgtable->mode > PAGE_MODE_6_LEVEL);
507 free_sub_pt(pgtable->root, pgtable->mode, &freelist);
509 /* Update data structure */
510 amd_iommu_domain_clr_pt_root(dom);
512 /* Make changes visible to IOMMUs */
513 amd_iommu_domain_update(dom);
515 put_pages_list(&freelist);
518 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
520 struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
522 cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES,
523 cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
524 cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
525 cfg->tlb = &v1_flush_ops;
527 pgtable->iop.ops.map_pages = iommu_v1_map_pages;
528 pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
529 pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
531 return &pgtable->iop;
534 struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
535 .alloc = v1_alloc_pgtable,
536 .free = v1_free_pgtable,