1 // SPDX-License-Identifier: GPL-2.0-only
3 * omap iommu: tlb and pagetable primitives
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
9 * Paul Mundt and Toshihiro Kobayashi
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/platform_device.h>
18 #include <linux/iommu.h>
19 #include <linux/omap-iommu.h>
20 #include <linux/mutex.h>
21 #include <linux/spinlock.h>
23 #include <linux/pm_runtime.h>
25 #include <linux/of_iommu.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_platform.h>
28 #include <linux/regmap.h>
29 #include <linux/mfd/syscon.h>
31 #include <linux/platform_data/iommu-omap.h>
33 #include "omap-iopgtable.h"
34 #include "omap-iommu.h"
36 static const struct iommu_ops omap_iommu_ops;
38 #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
40 /* bitmap of the page sizes currently supported */
41 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
43 #define MMU_LOCK_BASE_SHIFT 10
44 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
45 #define MMU_LOCK_BASE(x) \
46 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
48 #define MMU_LOCK_VICT_SHIFT 4
49 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
50 #define MMU_LOCK_VICT(x) \
51 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
53 static struct platform_driver omap_iommu_driver;
54 static struct kmem_cache *iopte_cachep;
57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
58 * @dom: generic iommu domain handle
60 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
62 return container_of(dom, struct omap_iommu_domain, domain);
66 * omap_iommu_save_ctx - Save registers for pm off-mode support
69 void omap_iommu_save_ctx(struct device *dev)
71 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
72 struct omap_iommu *obj;
79 while (arch_data->iommu_dev) {
80 obj = arch_data->iommu_dev;
82 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
83 p[i] = iommu_read_reg(obj, i * sizeof(u32));
84 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
90 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
93 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
96 void omap_iommu_restore_ctx(struct device *dev)
98 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
99 struct omap_iommu *obj;
106 while (arch_data->iommu_dev) {
107 obj = arch_data->iommu_dev;
109 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
110 iommu_write_reg(obj, p[i], i * sizeof(u32));
111 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
117 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
119 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
126 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
127 val = enable ? mask : 0;
128 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
131 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
133 u32 l = iommu_read_reg(obj, MMU_CNTL);
136 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
138 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
142 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
144 l |= (MMU_CNTL_MMU_EN);
146 iommu_write_reg(obj, l, MMU_CNTL);
149 static int omap2_iommu_enable(struct omap_iommu *obj)
153 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
156 pa = virt_to_phys(obj->iopgd);
157 if (!IS_ALIGNED(pa, SZ_16K))
160 l = iommu_read_reg(obj, MMU_REVISION);
161 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
162 (l >> 4) & 0xf, l & 0xf);
164 iommu_write_reg(obj, pa, MMU_TTB);
166 dra7_cfg_dspsys_mmu(obj, true);
168 if (obj->has_bus_err_back)
169 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
171 __iommu_set_twl(obj, true);
176 static void omap2_iommu_disable(struct omap_iommu *obj)
178 u32 l = iommu_read_reg(obj, MMU_CNTL);
181 iommu_write_reg(obj, l, MMU_CNTL);
182 dra7_cfg_dspsys_mmu(obj, false);
184 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
187 static int iommu_enable(struct omap_iommu *obj)
191 ret = pm_runtime_get_sync(obj->dev);
193 pm_runtime_put_noidle(obj->dev);
195 return ret < 0 ? ret : 0;
198 static void iommu_disable(struct omap_iommu *obj)
200 pm_runtime_put_sync(obj->dev);
206 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
208 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
209 u32 mask = get_cam_va_mask(cr->cam & page_size);
211 return cr->cam & mask;
214 static u32 get_iopte_attr(struct iotlb_entry *e)
218 attr = e->mixed << 5;
220 attr |= e->elsz >> 3;
221 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
222 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
226 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
228 u32 status, fault_addr;
230 status = iommu_read_reg(obj, MMU_IRQSTATUS);
231 status &= MMU_IRQ_MASK;
237 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
240 iommu_write_reg(obj, status, MMU_IRQSTATUS);
245 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
249 val = iommu_read_reg(obj, MMU_LOCK);
251 l->base = MMU_LOCK_BASE(val);
252 l->vict = MMU_LOCK_VICT(val);
255 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
259 val = (l->base << MMU_LOCK_BASE_SHIFT);
260 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
262 iommu_write_reg(obj, val, MMU_LOCK);
265 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
267 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
268 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
271 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
273 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
274 iommu_write_reg(obj, cr->ram, MMU_RAM);
276 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
277 iommu_write_reg(obj, 1, MMU_LD_TLB);
280 /* only used in iotlb iteration for-loop */
281 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
286 iotlb_lock_get(obj, &l);
288 iotlb_lock_set(obj, &l);
289 iotlb_read_cr(obj, &cr);
294 #ifdef PREFETCH_IOTLB
295 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
296 struct iotlb_entry *e)
303 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
304 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
306 return ERR_PTR(-EINVAL);
309 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
311 return ERR_PTR(-ENOMEM);
313 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
314 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
320 * load_iotlb_entry - Set an iommu tlb entry
322 * @e: an iommu tlb entry info
324 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
330 if (!obj || !obj->nr_tlb_entries || !e)
333 pm_runtime_get_sync(obj->dev);
335 iotlb_lock_get(obj, &l);
336 if (l.base == obj->nr_tlb_entries) {
337 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
345 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
346 if (!iotlb_cr_valid(&tmp))
349 if (i == obj->nr_tlb_entries) {
350 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
355 iotlb_lock_get(obj, &l);
358 iotlb_lock_set(obj, &l);
361 cr = iotlb_alloc_cr(obj, e);
363 pm_runtime_put_sync(obj->dev);
367 iotlb_load_cr(obj, cr);
372 /* increment victim for next tlb load */
373 if (++l.vict == obj->nr_tlb_entries)
375 iotlb_lock_set(obj, &l);
377 pm_runtime_put_sync(obj->dev);
381 #else /* !PREFETCH_IOTLB */
383 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
388 #endif /* !PREFETCH_IOTLB */
390 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
392 return load_iotlb_entry(obj, e);
396 * flush_iotlb_page - Clear an iommu tlb entry
398 * @da: iommu device virtual address
400 * Clear an iommu tlb entry which includes 'da' address.
402 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
407 pm_runtime_get_sync(obj->dev);
409 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
413 if (!iotlb_cr_valid(&cr))
416 start = iotlb_cr_to_virt(&cr);
417 bytes = iopgsz_to_bytes(cr.cam & 3);
419 if ((start <= da) && (da < start + bytes)) {
420 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
421 __func__, start, da, bytes);
422 iotlb_load_cr(obj, &cr);
423 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
427 pm_runtime_put_sync(obj->dev);
429 if (i == obj->nr_tlb_entries)
430 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
434 * flush_iotlb_all - Clear all iommu tlb entries
437 static void flush_iotlb_all(struct omap_iommu *obj)
441 pm_runtime_get_sync(obj->dev);
445 iotlb_lock_set(obj, &l);
447 iommu_write_reg(obj, 1, MMU_GFLUSH);
449 pm_runtime_put_sync(obj->dev);
453 * H/W pagetable operations
455 static void flush_iopte_range(struct device *dev, dma_addr_t dma,
456 unsigned long offset, int num_entries)
458 size_t size = num_entries * sizeof(u32);
460 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
463 static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
467 /* Note: freed iopte's must be clean ready for re-use */
470 pt_dma = virt_to_phys(iopte);
471 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
475 kmem_cache_free(iopte_cachep, iopte);
479 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
480 dma_addr_t *pt_dma, u32 da)
483 unsigned long offset = iopgd_index(da) * sizeof(da);
485 /* a table has already existed */
490 * do the allocation outside the page table lock
492 spin_unlock(&obj->page_table_lock);
493 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
494 spin_lock(&obj->page_table_lock);
498 return ERR_PTR(-ENOMEM);
500 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
502 if (dma_mapping_error(obj->dev, *pt_dma)) {
503 dev_err(obj->dev, "DMA map error for L2 table\n");
504 iopte_free(obj, iopte, false);
505 return ERR_PTR(-ENOMEM);
509 * we rely on dma address and the physical address to be
510 * the same for mapping the L2 table
512 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
513 dev_err(obj->dev, "DMA translation error for L2 table\n");
514 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
516 iopte_free(obj, iopte, false);
517 return ERR_PTR(-ENOMEM);
520 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
522 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
523 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
525 /* We raced, free the reduniovant table */
526 iopte_free(obj, iopte, false);
530 iopte = iopte_offset(iopgd, da);
531 *pt_dma = iopgd_page_paddr(iopgd);
533 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
534 __func__, da, iopgd, *iopgd, iopte, *iopte);
539 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
541 u32 *iopgd = iopgd_offset(obj, da);
542 unsigned long offset = iopgd_index(da) * sizeof(da);
544 if ((da | pa) & ~IOSECTION_MASK) {
545 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
546 __func__, da, pa, IOSECTION_SIZE);
550 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
551 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
555 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
557 u32 *iopgd = iopgd_offset(obj, da);
558 unsigned long offset = iopgd_index(da) * sizeof(da);
561 if ((da | pa) & ~IOSUPER_MASK) {
562 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
563 __func__, da, pa, IOSUPER_SIZE);
567 for (i = 0; i < 16; i++)
568 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
569 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
573 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
575 u32 *iopgd = iopgd_offset(obj, da);
577 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
578 unsigned long offset = iopte_index(da) * sizeof(da);
581 return PTR_ERR(iopte);
583 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
584 flush_iopte_range(obj->dev, pt_dma, offset, 1);
586 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
587 __func__, da, pa, iopte, *iopte);
592 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
594 u32 *iopgd = iopgd_offset(obj, da);
596 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
597 unsigned long offset = iopte_index(da) * sizeof(da);
600 if ((da | pa) & ~IOLARGE_MASK) {
601 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
602 __func__, da, pa, IOLARGE_SIZE);
607 return PTR_ERR(iopte);
609 for (i = 0; i < 16; i++)
610 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
611 flush_iopte_range(obj->dev, pt_dma, offset, 16);
616 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
618 int (*fn)(struct omap_iommu *, u32, u32, u32);
626 case MMU_CAM_PGSZ_16M:
627 fn = iopgd_alloc_super;
629 case MMU_CAM_PGSZ_1M:
630 fn = iopgd_alloc_section;
632 case MMU_CAM_PGSZ_64K:
633 fn = iopte_alloc_large;
635 case MMU_CAM_PGSZ_4K:
636 fn = iopte_alloc_page;
646 prot = get_iopte_attr(e);
648 spin_lock(&obj->page_table_lock);
649 err = fn(obj, e->da, e->pa, prot);
650 spin_unlock(&obj->page_table_lock);
656 * omap_iopgtable_store_entry - Make an iommu pte entry
658 * @e: an iommu tlb entry info
661 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
665 flush_iotlb_page(obj, e->da);
666 err = iopgtable_store_entry_core(obj, e);
668 prefetch_iotlb_entry(obj, e);
673 * iopgtable_lookup_entry - Lookup an iommu pte entry
675 * @da: iommu device virtual address
676 * @ppgd: iommu pgd entry pointer to be returned
677 * @ppte: iommu pte entry pointer to be returned
680 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
682 u32 *iopgd, *iopte = NULL;
684 iopgd = iopgd_offset(obj, da);
688 if (iopgd_is_table(*iopgd))
689 iopte = iopte_offset(iopgd, da);
695 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
698 u32 *iopgd = iopgd_offset(obj, da);
701 unsigned long pd_offset = iopgd_index(da) * sizeof(da);
702 unsigned long pt_offset = iopte_index(da) * sizeof(da);
707 if (iopgd_is_table(*iopgd)) {
709 u32 *iopte = iopte_offset(iopgd, da);
712 if (*iopte & IOPTE_LARGE) {
714 /* rewind to the 1st entry */
715 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
718 memset(iopte, 0, nent * sizeof(*iopte));
719 pt_dma = iopgd_page_paddr(iopgd);
720 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
723 * do table walk to check if this table is necessary or not
725 iopte = iopte_offset(iopgd, 0);
726 for (i = 0; i < PTRS_PER_IOPTE; i++)
730 iopte_free(obj, iopte, true);
731 nent = 1; /* for the next L1 entry */
734 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
736 /* rewind to the 1st entry */
737 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
741 memset(iopgd, 0, nent * sizeof(*iopgd));
742 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
748 * iopgtable_clear_entry - Remove an iommu pte entry
750 * @da: iommu device virtual address
752 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
756 spin_lock(&obj->page_table_lock);
758 bytes = iopgtable_clear_entry_core(obj, da);
759 flush_iotlb_page(obj, da);
761 spin_unlock(&obj->page_table_lock);
766 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
768 unsigned long offset;
771 spin_lock(&obj->page_table_lock);
773 for (i = 0; i < PTRS_PER_IOPGD; i++) {
777 da = i << IOPGD_SHIFT;
778 iopgd = iopgd_offset(obj, da);
779 offset = iopgd_index(da) * sizeof(da);
784 if (iopgd_is_table(*iopgd))
785 iopte_free(obj, iopte_offset(iopgd, 0), true);
788 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
791 flush_iotlb_all(obj);
793 spin_unlock(&obj->page_table_lock);
797 * Device IOMMU generic operations
799 static irqreturn_t iommu_fault_handler(int irq, void *data)
803 struct omap_iommu *obj = data;
804 struct iommu_domain *domain = obj->domain;
805 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
807 if (!omap_domain->dev)
810 errs = iommu_report_fault(obj, &da);
814 /* Fault callback or TLB/PTE Dynamic loading */
815 if (!report_iommu_fault(domain, obj->dev, da, 0))
818 iommu_write_reg(obj, 0, MMU_IRQENABLE);
820 iopgd = iopgd_offset(obj, da);
822 if (!iopgd_is_table(*iopgd)) {
823 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
824 obj->name, errs, da, iopgd, *iopgd);
828 iopte = iopte_offset(iopgd, da);
830 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
831 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
837 * omap_iommu_attach() - attach iommu device to an iommu domain
838 * @obj: target omap iommu device
841 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
845 spin_lock(&obj->iommu_lock);
847 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
849 if (dma_mapping_error(obj->dev, obj->pd_dma)) {
850 dev_err(obj->dev, "DMA map error for L1 table\n");
856 err = iommu_enable(obj);
859 flush_iotlb_all(obj);
861 spin_unlock(&obj->iommu_lock);
863 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
868 spin_unlock(&obj->iommu_lock);
874 * omap_iommu_detach - release iommu device
877 static void omap_iommu_detach(struct omap_iommu *obj)
879 if (!obj || IS_ERR(obj))
882 spin_lock(&obj->iommu_lock);
884 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
890 spin_unlock(&obj->iommu_lock);
892 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
896 * omap_iommu_runtime_suspend - disable an iommu device
899 * This function performs all that is necessary to disable an
900 * IOMMU device, either during final detachment from a client
901 * device, or during system/runtime suspend of the device. This
902 * includes programming all the appropriate IOMMU registers, and
903 * managing the associated omap_hwmod's state and the device's
906 static int omap_iommu_runtime_suspend(struct device *dev)
908 struct platform_device *pdev = to_platform_device(dev);
909 struct iommu_platform_data *pdata = dev_get_platdata(dev);
910 struct omap_iommu *obj = to_iommu(dev);
913 omap2_iommu_disable(obj);
915 if (pdata && pdata->device_idle)
916 pdata->device_idle(pdev);
918 if (pdata && pdata->assert_reset)
919 pdata->assert_reset(pdev, pdata->reset_name);
921 if (pdata && pdata->set_pwrdm_constraint) {
922 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
924 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
933 * omap_iommu_runtime_resume - enable an iommu device
936 * This function performs all that is necessary to enable an
937 * IOMMU device, either during initial attachment to a client
938 * device, or during system/runtime resume of the device. This
939 * includes programming all the appropriate IOMMU registers, and
940 * managing the associated omap_hwmod's state and the device's
943 static int omap_iommu_runtime_resume(struct device *dev)
945 struct platform_device *pdev = to_platform_device(dev);
946 struct iommu_platform_data *pdata = dev_get_platdata(dev);
947 struct omap_iommu *obj = to_iommu(dev);
950 if (pdata && pdata->set_pwrdm_constraint) {
951 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
953 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
958 if (pdata && pdata->deassert_reset) {
959 ret = pdata->deassert_reset(pdev, pdata->reset_name);
961 dev_err(dev, "deassert_reset failed: %d\n", ret);
966 if (pdata && pdata->device_enable)
967 pdata->device_enable(pdev);
969 ret = omap2_iommu_enable(obj);
974 static bool omap_iommu_can_register(struct platform_device *pdev)
976 struct device_node *np = pdev->dev.of_node;
978 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
982 * restrict IOMMU core registration only for processor-port MDMA MMUs
985 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
986 (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
992 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
993 struct omap_iommu *obj)
995 struct device_node *np = pdev->dev.of_node;
998 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1001 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
1002 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
1007 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
1008 if (IS_ERR(obj->syscfg)) {
1009 /* can fail with -EPROBE_DEFER */
1010 ret = PTR_ERR(obj->syscfg);
1014 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
1016 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
1020 if (obj->id != 0 && obj->id != 1) {
1021 dev_err(&pdev->dev, "invalid IOMMU instance id\n");
1029 * OMAP Device MMU(IOMMU) detection
1031 static int omap_iommu_probe(struct platform_device *pdev)
1035 struct omap_iommu *obj;
1036 struct resource *res;
1037 struct device_node *of = pdev->dev.of_node;
1040 pr_err("%s: only DT-based devices are supported\n", __func__);
1044 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1049 * self-manage the ordering dependencies between omap_device_enable/idle
1050 * and omap_device_assert/deassert_hardreset API
1052 if (pdev->dev.pm_domain) {
1053 dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
1054 pdev->dev.pm_domain = NULL;
1057 obj->name = dev_name(&pdev->dev);
1058 obj->nr_tlb_entries = 32;
1059 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
1060 if (err && err != -EINVAL)
1062 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
1064 if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
1065 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
1067 obj->dev = &pdev->dev;
1068 obj->ctx = (void *)obj + sizeof(*obj);
1070 spin_lock_init(&obj->iommu_lock);
1071 spin_lock_init(&obj->page_table_lock);
1073 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1074 obj->regbase = devm_ioremap_resource(obj->dev, res);
1075 if (IS_ERR(obj->regbase))
1076 return PTR_ERR(obj->regbase);
1078 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
1082 irq = platform_get_irq(pdev, 0);
1086 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1087 dev_name(obj->dev), obj);
1090 platform_set_drvdata(pdev, obj);
1092 if (omap_iommu_can_register(pdev)) {
1093 obj->group = iommu_group_alloc();
1094 if (IS_ERR(obj->group))
1095 return PTR_ERR(obj->group);
1097 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
1102 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
1104 err = iommu_device_register(&obj->iommu);
1109 pm_runtime_irq_safe(obj->dev);
1110 pm_runtime_enable(obj->dev);
1112 omap_iommu_debugfs_add(obj);
1114 dev_info(&pdev->dev, "%s registered\n", obj->name);
1119 iommu_device_sysfs_remove(&obj->iommu);
1121 iommu_group_put(obj->group);
1125 static int omap_iommu_remove(struct platform_device *pdev)
1127 struct omap_iommu *obj = platform_get_drvdata(pdev);
1130 iommu_group_put(obj->group);
1133 iommu_device_sysfs_remove(&obj->iommu);
1134 iommu_device_unregister(&obj->iommu);
1137 omap_iommu_debugfs_remove(obj);
1139 pm_runtime_disable(obj->dev);
1141 dev_info(&pdev->dev, "%s removed\n", obj->name);
1145 static const struct dev_pm_ops omap_iommu_pm_ops = {
1146 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
1147 omap_iommu_runtime_resume, NULL)
1150 static const struct of_device_id omap_iommu_of_match[] = {
1151 { .compatible = "ti,omap2-iommu" },
1152 { .compatible = "ti,omap4-iommu" },
1153 { .compatible = "ti,dra7-iommu" },
1154 { .compatible = "ti,dra7-dsp-iommu" },
1158 static struct platform_driver omap_iommu_driver = {
1159 .probe = omap_iommu_probe,
1160 .remove = omap_iommu_remove,
1162 .name = "omap-iommu",
1163 .pm = &omap_iommu_pm_ops,
1164 .of_match_table = of_match_ptr(omap_iommu_of_match),
1168 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1170 memset(e, 0, sizeof(*e));
1174 e->valid = MMU_CAM_V;
1176 e->endian = MMU_RAM_ENDIAN_LITTLE;
1177 e->elsz = MMU_RAM_ELSZ_8;
1180 return iopgsz_to_bytes(e->pgsz);
1183 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1184 phys_addr_t pa, size_t bytes, int prot)
1186 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1187 struct device *dev = omap_domain->dev;
1188 struct omap_iommu_device *iommu;
1189 struct omap_iommu *oiommu;
1190 struct iotlb_entry e;
1195 omap_pgsz = bytes_to_iopgsz(bytes);
1196 if (omap_pgsz < 0) {
1197 dev_err(dev, "invalid size to map: %d\n", bytes);
1201 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
1203 iotlb_init_entry(&e, da, pa, omap_pgsz);
1205 iommu = omap_domain->iommus;
1206 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1207 oiommu = iommu->iommu_dev;
1208 ret = omap_iopgtable_store_entry(oiommu, &e);
1210 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1219 oiommu = iommu->iommu_dev;
1220 iopgtable_clear_entry(oiommu, da);
1227 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1230 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1231 struct device *dev = omap_domain->dev;
1232 struct omap_iommu_device *iommu;
1233 struct omap_iommu *oiommu;
1238 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1240 iommu = omap_domain->iommus;
1241 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1242 oiommu = iommu->iommu_dev;
1243 bytes = iopgtable_clear_entry(oiommu, da);
1249 * simplify return - we are only checking if any of the iommus
1250 * reported an error, but not if all of them are unmapping the
1251 * same number of entries. This should not occur due to the
1252 * mirror programming.
1254 return error ? 0 : bytes;
1257 static int omap_iommu_count(struct device *dev)
1259 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1262 while (arch_data->iommu_dev) {
1270 /* caller should call cleanup if this function fails */
1271 static int omap_iommu_attach_init(struct device *dev,
1272 struct omap_iommu_domain *odomain)
1274 struct omap_iommu_device *iommu;
1277 odomain->num_iommus = omap_iommu_count(dev);
1278 if (!odomain->num_iommus)
1281 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1283 if (!odomain->iommus)
1286 iommu = odomain->iommus;
1287 for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1288 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1289 if (!iommu->pgtable)
1293 * should never fail, but please keep this around to ensure
1294 * we keep the hardware happy
1296 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1304 static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1307 struct omap_iommu_device *iommu = odomain->iommus;
1309 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1310 kfree(iommu->pgtable);
1312 kfree(odomain->iommus);
1313 odomain->num_iommus = 0;
1314 odomain->iommus = NULL;
1318 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1320 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1321 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1322 struct omap_iommu_device *iommu;
1323 struct omap_iommu *oiommu;
1327 if (!arch_data || !arch_data->iommu_dev) {
1328 dev_err(dev, "device doesn't have an associated iommu\n");
1332 spin_lock(&omap_domain->lock);
1334 /* only a single client device can be attached to a domain */
1335 if (omap_domain->dev) {
1336 dev_err(dev, "iommu domain is already attached\n");
1341 ret = omap_iommu_attach_init(dev, omap_domain);
1343 dev_err(dev, "failed to allocate required iommu data %d\n",
1348 iommu = omap_domain->iommus;
1349 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1350 /* configure and enable the omap iommu */
1351 oiommu = arch_data->iommu_dev;
1352 ret = omap_iommu_attach(oiommu, iommu->pgtable);
1354 dev_err(dev, "can't get omap iommu: %d\n", ret);
1358 oiommu->domain = domain;
1359 iommu->iommu_dev = oiommu;
1362 omap_domain->dev = dev;
1370 oiommu = iommu->iommu_dev;
1371 omap_iommu_detach(oiommu);
1372 iommu->iommu_dev = NULL;
1373 oiommu->domain = NULL;
1376 omap_iommu_detach_fini(omap_domain);
1378 spin_unlock(&omap_domain->lock);
1382 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1385 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1386 struct omap_iommu_device *iommu = omap_domain->iommus;
1387 struct omap_iommu *oiommu;
1390 if (!omap_domain->dev) {
1391 dev_err(dev, "domain has no attached device\n");
1395 /* only a single device is supported per domain for now */
1396 if (omap_domain->dev != dev) {
1397 dev_err(dev, "invalid attached device\n");
1402 * cleanup in the reverse order of attachment - this addresses
1403 * any h/w dependencies between multiple instances, if any
1405 iommu += (omap_domain->num_iommus - 1);
1406 arch_data += (omap_domain->num_iommus - 1);
1407 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1408 oiommu = iommu->iommu_dev;
1409 iopgtable_clear_entry_all(oiommu);
1411 omap_iommu_detach(oiommu);
1412 iommu->iommu_dev = NULL;
1413 oiommu->domain = NULL;
1416 omap_iommu_detach_fini(omap_domain);
1418 omap_domain->dev = NULL;
1421 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1424 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1426 spin_lock(&omap_domain->lock);
1427 _omap_iommu_detach_dev(omap_domain, dev);
1428 spin_unlock(&omap_domain->lock);
1431 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1433 struct omap_iommu_domain *omap_domain;
1435 if (type != IOMMU_DOMAIN_UNMANAGED)
1438 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1442 spin_lock_init(&omap_domain->lock);
1444 omap_domain->domain.geometry.aperture_start = 0;
1445 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
1446 omap_domain->domain.geometry.force_aperture = true;
1448 return &omap_domain->domain;
1451 static void omap_iommu_domain_free(struct iommu_domain *domain)
1453 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1456 * An iommu device is still attached
1457 * (currently, only one device can be attached) ?
1459 if (omap_domain->dev)
1460 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1465 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1468 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1469 struct omap_iommu_device *iommu = omap_domain->iommus;
1470 struct omap_iommu *oiommu = iommu->iommu_dev;
1471 struct device *dev = oiommu->dev;
1473 phys_addr_t ret = 0;
1476 * all the iommus within the domain will have identical programming,
1477 * so perform the lookup using just the first iommu
1479 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1482 if (iopte_is_small(*pte))
1483 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1484 else if (iopte_is_large(*pte))
1485 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1487 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1488 (unsigned long long)da);
1490 if (iopgd_is_section(*pgd))
1491 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1492 else if (iopgd_is_super(*pgd))
1493 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1495 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1496 (unsigned long long)da);
1502 static int omap_iommu_add_device(struct device *dev)
1504 struct omap_iommu_arch_data *arch_data, *tmp;
1505 struct omap_iommu *oiommu;
1506 struct iommu_group *group;
1507 struct device_node *np;
1508 struct platform_device *pdev;
1513 * Allocate the archdata iommu structure for DT-based devices.
1515 * TODO: Simplify this when removing non-DT support completely from the
1522 * retrieve the count of IOMMU nodes using phandle size as element size
1523 * since #iommu-cells = 0 for OMAP
1525 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1530 arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
1534 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1535 np = of_parse_phandle(dev->of_node, "iommus", i);
1541 pdev = of_find_device_by_node(np);
1542 if (WARN_ON(!pdev)) {
1548 oiommu = platform_get_drvdata(pdev);
1555 tmp->iommu_dev = oiommu;
1561 * use the first IOMMU alone for the sysfs device linking.
1562 * TODO: Evaluate if a single iommu_group needs to be
1563 * maintained for both IOMMUs
1565 oiommu = arch_data->iommu_dev;
1566 ret = iommu_device_link(&oiommu->iommu, dev);
1572 dev->archdata.iommu = arch_data;
1575 * IOMMU group initialization calls into omap_iommu_device_group, which
1576 * needs a valid dev->archdata.iommu pointer
1578 group = iommu_group_get_for_dev(dev);
1579 if (IS_ERR(group)) {
1580 iommu_device_unlink(&oiommu->iommu, dev);
1581 dev->archdata.iommu = NULL;
1583 return PTR_ERR(group);
1585 iommu_group_put(group);
1590 static void omap_iommu_remove_device(struct device *dev)
1592 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1594 if (!dev->of_node || !arch_data)
1597 iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
1598 iommu_group_remove_device(dev);
1600 dev->archdata.iommu = NULL;
1605 static struct iommu_group *omap_iommu_device_group(struct device *dev)
1607 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1608 struct iommu_group *group = ERR_PTR(-EINVAL);
1610 if (arch_data->iommu_dev)
1611 group = iommu_group_ref_get(arch_data->iommu_dev->group);
1616 static const struct iommu_ops omap_iommu_ops = {
1617 .domain_alloc = omap_iommu_domain_alloc,
1618 .domain_free = omap_iommu_domain_free,
1619 .attach_dev = omap_iommu_attach_dev,
1620 .detach_dev = omap_iommu_detach_dev,
1621 .map = omap_iommu_map,
1622 .unmap = omap_iommu_unmap,
1623 .iova_to_phys = omap_iommu_iova_to_phys,
1624 .add_device = omap_iommu_add_device,
1625 .remove_device = omap_iommu_remove_device,
1626 .device_group = omap_iommu_device_group,
1627 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1630 static int __init omap_iommu_init(void)
1632 struct kmem_cache *p;
1633 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1634 size_t align = 1 << 10; /* L2 pagetable alignement */
1635 struct device_node *np;
1638 np = of_find_matching_node(NULL, omap_iommu_of_match);
1644 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1650 omap_iommu_debugfs_init();
1652 ret = platform_driver_register(&omap_iommu_driver);
1654 pr_err("%s: failed to register driver\n", __func__);
1658 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1665 platform_driver_unregister(&omap_iommu_driver);
1667 kmem_cache_destroy(iopte_cachep);
1670 subsys_initcall(omap_iommu_init);
1671 /* must be ready before omap3isp is probed */