Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-exynos.git] / drivers / iommu / ipmmu-vmsa.c
1 /*
2  * IPMMU VMSA
3  *
4  * Copyright (C) 2014 Renesas Electronics Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  */
10
11 #include <linux/bitmap.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iommu.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26
27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
28 #include <asm/dma-iommu.h>
29 #include <asm/pgalloc.h>
30 #endif
31
32 #include "io-pgtable.h"
33
34 #define IPMMU_CTX_MAX 1
35
36 struct ipmmu_vmsa_device {
37         struct device *dev;
38         void __iomem *base;
39         struct iommu_device iommu;
40
41         unsigned int num_utlbs;
42         spinlock_t lock;                        /* Protects ctx and domains[] */
43         DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
44         struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
45
46         struct dma_iommu_mapping *mapping;
47 };
48
49 struct ipmmu_vmsa_domain {
50         struct ipmmu_vmsa_device *mmu;
51         struct iommu_domain io_domain;
52
53         struct io_pgtable_cfg cfg;
54         struct io_pgtable_ops *iop;
55
56         unsigned int context_id;
57         spinlock_t lock;                        /* Protects mappings */
58 };
59
60 struct ipmmu_vmsa_iommu_priv {
61         struct ipmmu_vmsa_device *mmu;
62         struct device *dev;
63         struct list_head list;
64 };
65
66 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
67 {
68         return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
69 }
70
71 static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
72 {
73         return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
74 }
75
76 #define TLB_LOOP_TIMEOUT                100     /* 100us */
77
78 /* -----------------------------------------------------------------------------
79  * Registers Definition
80  */
81
82 #define IM_NS_ALIAS_OFFSET              0x800
83
84 #define IM_CTX_SIZE                     0x40
85
86 #define IMCTR                           0x0000
87 #define IMCTR_TRE                       (1 << 17)
88 #define IMCTR_AFE                       (1 << 16)
89 #define IMCTR_RTSEL_MASK                (3 << 4)
90 #define IMCTR_RTSEL_SHIFT               4
91 #define IMCTR_TREN                      (1 << 3)
92 #define IMCTR_INTEN                     (1 << 2)
93 #define IMCTR_FLUSH                     (1 << 1)
94 #define IMCTR_MMUEN                     (1 << 0)
95
96 #define IMCAAR                          0x0004
97
98 #define IMTTBCR                         0x0008
99 #define IMTTBCR_EAE                     (1 << 31)
100 #define IMTTBCR_PMB                     (1 << 30)
101 #define IMTTBCR_SH1_NON_SHAREABLE       (0 << 28)
102 #define IMTTBCR_SH1_OUTER_SHAREABLE     (2 << 28)
103 #define IMTTBCR_SH1_INNER_SHAREABLE     (3 << 28)
104 #define IMTTBCR_SH1_MASK                (3 << 28)
105 #define IMTTBCR_ORGN1_NC                (0 << 26)
106 #define IMTTBCR_ORGN1_WB_WA             (1 << 26)
107 #define IMTTBCR_ORGN1_WT                (2 << 26)
108 #define IMTTBCR_ORGN1_WB                (3 << 26)
109 #define IMTTBCR_ORGN1_MASK              (3 << 26)
110 #define IMTTBCR_IRGN1_NC                (0 << 24)
111 #define IMTTBCR_IRGN1_WB_WA             (1 << 24)
112 #define IMTTBCR_IRGN1_WT                (2 << 24)
113 #define IMTTBCR_IRGN1_WB                (3 << 24)
114 #define IMTTBCR_IRGN1_MASK              (3 << 24)
115 #define IMTTBCR_TSZ1_MASK               (7 << 16)
116 #define IMTTBCR_TSZ1_SHIFT              16
117 #define IMTTBCR_SH0_NON_SHAREABLE       (0 << 12)
118 #define IMTTBCR_SH0_OUTER_SHAREABLE     (2 << 12)
119 #define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)
120 #define IMTTBCR_SH0_MASK                (3 << 12)
121 #define IMTTBCR_ORGN0_NC                (0 << 10)
122 #define IMTTBCR_ORGN0_WB_WA             (1 << 10)
123 #define IMTTBCR_ORGN0_WT                (2 << 10)
124 #define IMTTBCR_ORGN0_WB                (3 << 10)
125 #define IMTTBCR_ORGN0_MASK              (3 << 10)
126 #define IMTTBCR_IRGN0_NC                (0 << 8)
127 #define IMTTBCR_IRGN0_WB_WA             (1 << 8)
128 #define IMTTBCR_IRGN0_WT                (2 << 8)
129 #define IMTTBCR_IRGN0_WB                (3 << 8)
130 #define IMTTBCR_IRGN0_MASK              (3 << 8)
131 #define IMTTBCR_SL0_LVL_2               (0 << 4)
132 #define IMTTBCR_SL0_LVL_1               (1 << 4)
133 #define IMTTBCR_TSZ0_MASK               (7 << 0)
134 #define IMTTBCR_TSZ0_SHIFT              O
135
136 #define IMBUSCR                         0x000c
137 #define IMBUSCR_DVM                     (1 << 2)
138 #define IMBUSCR_BUSSEL_SYS              (0 << 0)
139 #define IMBUSCR_BUSSEL_CCI              (1 << 0)
140 #define IMBUSCR_BUSSEL_IMCAAR           (2 << 0)
141 #define IMBUSCR_BUSSEL_CCI_IMCAAR       (3 << 0)
142 #define IMBUSCR_BUSSEL_MASK             (3 << 0)
143
144 #define IMTTLBR0                        0x0010
145 #define IMTTUBR0                        0x0014
146 #define IMTTLBR1                        0x0018
147 #define IMTTUBR1                        0x001c
148
149 #define IMSTR                           0x0020
150 #define IMSTR_ERRLVL_MASK               (3 << 12)
151 #define IMSTR_ERRLVL_SHIFT              12
152 #define IMSTR_ERRCODE_TLB_FORMAT        (1 << 8)
153 #define IMSTR_ERRCODE_ACCESS_PERM       (4 << 8)
154 #define IMSTR_ERRCODE_SECURE_ACCESS     (5 << 8)
155 #define IMSTR_ERRCODE_MASK              (7 << 8)
156 #define IMSTR_MHIT                      (1 << 4)
157 #define IMSTR_ABORT                     (1 << 2)
158 #define IMSTR_PF                        (1 << 1)
159 #define IMSTR_TF                        (1 << 0)
160
161 #define IMMAIR0                         0x0028
162 #define IMMAIR1                         0x002c
163 #define IMMAIR_ATTR_MASK                0xff
164 #define IMMAIR_ATTR_DEVICE              0x04
165 #define IMMAIR_ATTR_NC                  0x44
166 #define IMMAIR_ATTR_WBRWA               0xff
167 #define IMMAIR_ATTR_SHIFT(n)            ((n) << 3)
168 #define IMMAIR_ATTR_IDX_NC              0
169 #define IMMAIR_ATTR_IDX_WBRWA           1
170 #define IMMAIR_ATTR_IDX_DEV             2
171
172 #define IMEAR                           0x0030
173
174 #define IMPCTR                          0x0200
175 #define IMPSTR                          0x0208
176 #define IMPEAR                          0x020c
177 #define IMPMBA(n)                       (0x0280 + ((n) * 4))
178 #define IMPMBD(n)                       (0x02c0 + ((n) * 4))
179
180 #define IMUCTR(n)                       (0x0300 + ((n) * 16))
181 #define IMUCTR_FIXADDEN                 (1 << 31)
182 #define IMUCTR_FIXADD_MASK              (0xff << 16)
183 #define IMUCTR_FIXADD_SHIFT             16
184 #define IMUCTR_TTSEL_MMU(n)             ((n) << 4)
185 #define IMUCTR_TTSEL_PMB                (8 << 4)
186 #define IMUCTR_TTSEL_MASK               (15 << 4)
187 #define IMUCTR_FLUSH                    (1 << 1)
188 #define IMUCTR_MMUEN                    (1 << 0)
189
190 #define IMUASID(n)                      (0x0308 + ((n) * 16))
191 #define IMUASID_ASID8_MASK              (0xff << 8)
192 #define IMUASID_ASID8_SHIFT             8
193 #define IMUASID_ASID0_MASK              (0xff << 0)
194 #define IMUASID_ASID0_SHIFT             0
195
196 /* -----------------------------------------------------------------------------
197  * Read/Write Access
198  */
199
200 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
201 {
202         return ioread32(mmu->base + offset);
203 }
204
205 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
206                         u32 data)
207 {
208         iowrite32(data, mmu->base + offset);
209 }
210
211 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
212 {
213         return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
214 }
215
216 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
217                             u32 data)
218 {
219         ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
220 }
221
222 /* -----------------------------------------------------------------------------
223  * TLB and microTLB Management
224  */
225
226 /* Wait for any pending TLB invalidations to complete */
227 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
228 {
229         unsigned int count = 0;
230
231         while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
232                 cpu_relax();
233                 if (++count == TLB_LOOP_TIMEOUT) {
234                         dev_err_ratelimited(domain->mmu->dev,
235                         "TLB sync timed out -- MMU may be deadlocked\n");
236                         return;
237                 }
238                 udelay(1);
239         }
240 }
241
242 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
243 {
244         u32 reg;
245
246         reg = ipmmu_ctx_read(domain, IMCTR);
247         reg |= IMCTR_FLUSH;
248         ipmmu_ctx_write(domain, IMCTR, reg);
249
250         ipmmu_tlb_sync(domain);
251 }
252
253 /*
254  * Enable MMU translation for the microTLB.
255  */
256 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
257                               unsigned int utlb)
258 {
259         struct ipmmu_vmsa_device *mmu = domain->mmu;
260
261         /*
262          * TODO: Reference-count the microTLB as several bus masters can be
263          * connected to the same microTLB.
264          */
265
266         /* TODO: What should we set the ASID to ? */
267         ipmmu_write(mmu, IMUASID(utlb), 0);
268         /* TODO: Do we need to flush the microTLB ? */
269         ipmmu_write(mmu, IMUCTR(utlb),
270                     IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
271                     IMUCTR_MMUEN);
272 }
273
274 /*
275  * Disable MMU translation for the microTLB.
276  */
277 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
278                                unsigned int utlb)
279 {
280         struct ipmmu_vmsa_device *mmu = domain->mmu;
281
282         ipmmu_write(mmu, IMUCTR(utlb), 0);
283 }
284
285 static void ipmmu_tlb_flush_all(void *cookie)
286 {
287         struct ipmmu_vmsa_domain *domain = cookie;
288
289         ipmmu_tlb_invalidate(domain);
290 }
291
292 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
293                                 size_t granule, bool leaf, void *cookie)
294 {
295         /* The hardware doesn't support selective TLB flush. */
296 }
297
298 static const struct iommu_gather_ops ipmmu_gather_ops = {
299         .tlb_flush_all = ipmmu_tlb_flush_all,
300         .tlb_add_flush = ipmmu_tlb_add_flush,
301         .tlb_sync = ipmmu_tlb_flush_all,
302 };
303
304 /* -----------------------------------------------------------------------------
305  * Domain/Context Management
306  */
307
308 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
309                                          struct ipmmu_vmsa_domain *domain)
310 {
311         unsigned long flags;
312         int ret;
313
314         spin_lock_irqsave(&mmu->lock, flags);
315
316         ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
317         if (ret != IPMMU_CTX_MAX) {
318                 mmu->domains[ret] = domain;
319                 set_bit(ret, mmu->ctx);
320         }
321
322         spin_unlock_irqrestore(&mmu->lock, flags);
323
324         return ret;
325 }
326
327 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
328                                       unsigned int context_id)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&mmu->lock, flags);
333
334         clear_bit(context_id, mmu->ctx);
335         mmu->domains[context_id] = NULL;
336
337         spin_unlock_irqrestore(&mmu->lock, flags);
338 }
339
340 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
341 {
342         u64 ttbr;
343         int ret;
344
345         /*
346          * Allocate the page table operations.
347          *
348          * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
349          * access, Long-descriptor format" that the NStable bit being set in a
350          * table descriptor will result in the NStable and NS bits of all child
351          * entries being ignored and considered as being set. The IPMMU seems
352          * not to comply with this, as it generates a secure access page fault
353          * if any of the NStable and NS bits isn't set when running in
354          * non-secure mode.
355          */
356         domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
357         domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
358         domain->cfg.ias = 32;
359         domain->cfg.oas = 40;
360         domain->cfg.tlb = &ipmmu_gather_ops;
361         domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
362         domain->io_domain.geometry.force_aperture = true;
363         /*
364          * TODO: Add support for coherent walk through CCI with DVM and remove
365          * cache handling. For now, delegate it to the io-pgtable code.
366          */
367         domain->cfg.iommu_dev = domain->mmu->dev;
368
369         /*
370          * Find an unused context.
371          */
372         ret = ipmmu_domain_allocate_context(domain->mmu, domain);
373         if (ret == IPMMU_CTX_MAX)
374                 return -EBUSY;
375
376         domain->context_id = ret;
377
378         domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
379                                            domain);
380         if (!domain->iop) {
381                 ipmmu_domain_free_context(domain->mmu, domain->context_id);
382                 return -EINVAL;
383         }
384
385         /* TTBR0 */
386         ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
387         ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
388         ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
389
390         /*
391          * TTBCR
392          * We use long descriptors with inner-shareable WBWA tables and allocate
393          * the whole 32-bit VA space to TTBR0.
394          */
395         ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
396                         IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
397                         IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
398
399         /* MAIR0 */
400         ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
401
402         /* IMBUSCR */
403         ipmmu_ctx_write(domain, IMBUSCR,
404                         ipmmu_ctx_read(domain, IMBUSCR) &
405                         ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
406
407         /*
408          * IMSTR
409          * Clear all interrupt flags.
410          */
411         ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
412
413         /*
414          * IMCTR
415          * Enable the MMU and interrupt generation. The long-descriptor
416          * translation table format doesn't use TEX remapping. Don't enable AF
417          * software management as we have no use for it. Flush the TLB as
418          * required when modifying the context registers.
419          */
420         ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
421
422         return 0;
423 }
424
425 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
426 {
427         /*
428          * Disable the context. Flush the TLB as required when modifying the
429          * context registers.
430          *
431          * TODO: Is TLB flush really needed ?
432          */
433         ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
434         ipmmu_tlb_sync(domain);
435         ipmmu_domain_free_context(domain->mmu, domain->context_id);
436 }
437
438 /* -----------------------------------------------------------------------------
439  * Fault Handling
440  */
441
442 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
443 {
444         const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
445         struct ipmmu_vmsa_device *mmu = domain->mmu;
446         u32 status;
447         u32 iova;
448
449         status = ipmmu_ctx_read(domain, IMSTR);
450         if (!(status & err_mask))
451                 return IRQ_NONE;
452
453         iova = ipmmu_ctx_read(domain, IMEAR);
454
455         /*
456          * Clear the error status flags. Unlike traditional interrupt flag
457          * registers that must be cleared by writing 1, this status register
458          * seems to require 0. The error address register must be read before,
459          * otherwise its value will be 0.
460          */
461         ipmmu_ctx_write(domain, IMSTR, 0);
462
463         /* Log fatal errors. */
464         if (status & IMSTR_MHIT)
465                 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
466                                     iova);
467         if (status & IMSTR_ABORT)
468                 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
469                                     iova);
470
471         if (!(status & (IMSTR_PF | IMSTR_TF)))
472                 return IRQ_NONE;
473
474         /*
475          * Try to handle page faults and translation faults.
476          *
477          * TODO: We need to look up the faulty device based on the I/O VA. Use
478          * the IOMMU device for now.
479          */
480         if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
481                 return IRQ_HANDLED;
482
483         dev_err_ratelimited(mmu->dev,
484                             "Unhandled fault: status 0x%08x iova 0x%08x\n",
485                             status, iova);
486
487         return IRQ_HANDLED;
488 }
489
490 static irqreturn_t ipmmu_irq(int irq, void *dev)
491 {
492         struct ipmmu_vmsa_device *mmu = dev;
493         irqreturn_t status = IRQ_NONE;
494         unsigned int i;
495         unsigned long flags;
496
497         spin_lock_irqsave(&mmu->lock, flags);
498
499         /*
500          * Check interrupts for all active contexts.
501          */
502         for (i = 0; i < IPMMU_CTX_MAX; i++) {
503                 if (!mmu->domains[i])
504                         continue;
505                 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
506                         status = IRQ_HANDLED;
507         }
508
509         spin_unlock_irqrestore(&mmu->lock, flags);
510
511         return status;
512 }
513
514 /* -----------------------------------------------------------------------------
515  * IOMMU Operations
516  */
517
518 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
519 {
520         struct ipmmu_vmsa_domain *domain;
521
522         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
523         if (!domain)
524                 return NULL;
525
526         spin_lock_init(&domain->lock);
527
528         return &domain->io_domain;
529 }
530
531 static void ipmmu_domain_free(struct iommu_domain *io_domain)
532 {
533         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
534
535         /*
536          * Free the domain resources. We assume that all devices have already
537          * been detached.
538          */
539         ipmmu_domain_destroy_context(domain);
540         free_io_pgtable_ops(domain->iop);
541         kfree(domain);
542 }
543
544 static int ipmmu_attach_device(struct iommu_domain *io_domain,
545                                struct device *dev)
546 {
547         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
548         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
549         struct ipmmu_vmsa_device *mmu = priv->mmu;
550         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
551         unsigned long flags;
552         unsigned int i;
553         int ret = 0;
554
555         if (!priv || !priv->mmu) {
556                 dev_err(dev, "Cannot attach to IPMMU\n");
557                 return -ENXIO;
558         }
559
560         spin_lock_irqsave(&domain->lock, flags);
561
562         if (!domain->mmu) {
563                 /* The domain hasn't been used yet, initialize it. */
564                 domain->mmu = mmu;
565                 ret = ipmmu_domain_init_context(domain);
566         } else if (domain->mmu != mmu) {
567                 /*
568                  * Something is wrong, we can't attach two devices using
569                  * different IOMMUs to the same domain.
570                  */
571                 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
572                         dev_name(mmu->dev), dev_name(domain->mmu->dev));
573                 ret = -EINVAL;
574         } else
575                 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
576
577         spin_unlock_irqrestore(&domain->lock, flags);
578
579         if (ret < 0)
580                 return ret;
581
582         for (i = 0; i < fwspec->num_ids; ++i)
583                 ipmmu_utlb_enable(domain, fwspec->ids[i]);
584
585         return 0;
586 }
587
588 static void ipmmu_detach_device(struct iommu_domain *io_domain,
589                                 struct device *dev)
590 {
591         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
592         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
593         unsigned int i;
594
595         for (i = 0; i < fwspec->num_ids; ++i)
596                 ipmmu_utlb_disable(domain, fwspec->ids[i]);
597
598         /*
599          * TODO: Optimize by disabling the context when no device is attached.
600          */
601 }
602
603 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
604                      phys_addr_t paddr, size_t size, int prot)
605 {
606         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
607
608         if (!domain)
609                 return -ENODEV;
610
611         return domain->iop->map(domain->iop, iova, paddr, size, prot);
612 }
613
614 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
615                           size_t size)
616 {
617         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
618
619         return domain->iop->unmap(domain->iop, iova, size);
620 }
621
622 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
623                                       dma_addr_t iova)
624 {
625         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
626
627         /* TODO: Is locking needed ? */
628
629         return domain->iop->iova_to_phys(domain->iop, iova);
630 }
631
632 static int ipmmu_init_platform_device(struct device *dev,
633                                       struct of_phandle_args *args)
634 {
635         struct platform_device *ipmmu_pdev;
636         struct ipmmu_vmsa_iommu_priv *priv;
637
638         ipmmu_pdev = of_find_device_by_node(args->np);
639         if (!ipmmu_pdev)
640                 return -ENODEV;
641
642         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
643         if (!priv)
644                 return -ENOMEM;
645
646         priv->mmu = platform_get_drvdata(ipmmu_pdev);
647         priv->dev = dev;
648         dev->iommu_fwspec->iommu_priv = priv;
649         return 0;
650 }
651
652 static int ipmmu_of_xlate(struct device *dev,
653                           struct of_phandle_args *spec)
654 {
655         iommu_fwspec_add_ids(dev, spec->args, 1);
656
657         /* Initialize once - xlate() will call multiple times */
658         if (to_priv(dev))
659                 return 0;
660
661         return ipmmu_init_platform_device(dev, spec);
662 }
663
664 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
665
666 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
667 {
668         if (type != IOMMU_DOMAIN_UNMANAGED)
669                 return NULL;
670
671         return __ipmmu_domain_alloc(type);
672 }
673
674 static int ipmmu_add_device(struct device *dev)
675 {
676         struct ipmmu_vmsa_device *mmu = NULL;
677         struct iommu_group *group;
678         int ret;
679
680         /*
681          * Only let through devices that have been verified in xlate()
682          */
683         if (!to_priv(dev))
684                 return -ENODEV;
685
686         /* Create a device group and add the device to it. */
687         group = iommu_group_alloc();
688         if (IS_ERR(group)) {
689                 dev_err(dev, "Failed to allocate IOMMU group\n");
690                 ret = PTR_ERR(group);
691                 goto error;
692         }
693
694         ret = iommu_group_add_device(group, dev);
695         iommu_group_put(group);
696
697         if (ret < 0) {
698                 dev_err(dev, "Failed to add device to IPMMU group\n");
699                 group = NULL;
700                 goto error;
701         }
702
703         /*
704          * Create the ARM mapping, used by the ARM DMA mapping core to allocate
705          * VAs. This will allocate a corresponding IOMMU domain.
706          *
707          * TODO:
708          * - Create one mapping per context (TLB).
709          * - Make the mapping size configurable ? We currently use a 2GB mapping
710          *   at a 1GB offset to ensure that NULL VAs will fault.
711          */
712         mmu = to_priv(dev)->mmu;
713         if (!mmu->mapping) {
714                 struct dma_iommu_mapping *mapping;
715
716                 mapping = arm_iommu_create_mapping(&platform_bus_type,
717                                                    SZ_1G, SZ_2G);
718                 if (IS_ERR(mapping)) {
719                         dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
720                         ret = PTR_ERR(mapping);
721                         goto error;
722                 }
723
724                 mmu->mapping = mapping;
725         }
726
727         /* Attach the ARM VA mapping to the device. */
728         ret = arm_iommu_attach_device(dev, mmu->mapping);
729         if (ret < 0) {
730                 dev_err(dev, "Failed to attach device to VA mapping\n");
731                 goto error;
732         }
733
734         return 0;
735
736 error:
737         if (mmu)
738                 arm_iommu_release_mapping(mmu->mapping);
739
740         if (!IS_ERR_OR_NULL(group))
741                 iommu_group_remove_device(dev);
742
743         return ret;
744 }
745
746 static void ipmmu_remove_device(struct device *dev)
747 {
748         arm_iommu_detach_device(dev);
749         iommu_group_remove_device(dev);
750 }
751
752 static const struct iommu_ops ipmmu_ops = {
753         .domain_alloc = ipmmu_domain_alloc,
754         .domain_free = ipmmu_domain_free,
755         .attach_dev = ipmmu_attach_device,
756         .detach_dev = ipmmu_detach_device,
757         .map = ipmmu_map,
758         .unmap = ipmmu_unmap,
759         .map_sg = default_iommu_map_sg,
760         .iova_to_phys = ipmmu_iova_to_phys,
761         .add_device = ipmmu_add_device,
762         .remove_device = ipmmu_remove_device,
763         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
764         .of_xlate = ipmmu_of_xlate,
765 };
766
767 #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
768
769 #ifdef CONFIG_IOMMU_DMA
770
771 static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
772 static LIST_HEAD(ipmmu_slave_devices);
773
774 static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
775 {
776         struct iommu_domain *io_domain = NULL;
777
778         switch (type) {
779         case IOMMU_DOMAIN_UNMANAGED:
780                 io_domain = __ipmmu_domain_alloc(type);
781                 break;
782
783         case IOMMU_DOMAIN_DMA:
784                 io_domain = __ipmmu_domain_alloc(type);
785                 if (io_domain)
786                         iommu_get_dma_cookie(io_domain);
787                 break;
788         }
789
790         return io_domain;
791 }
792
793 static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
794 {
795         switch (io_domain->type) {
796         case IOMMU_DOMAIN_DMA:
797                 iommu_put_dma_cookie(io_domain);
798                 /* fall-through */
799         default:
800                 ipmmu_domain_free(io_domain);
801                 break;
802         }
803 }
804
805 static int ipmmu_add_device_dma(struct device *dev)
806 {
807         struct iommu_group *group;
808
809         /*
810          * Only let through devices that have been verified in xlate()
811          */
812         if (!to_priv(dev))
813                 return -ENODEV;
814
815         group = iommu_group_get_for_dev(dev);
816         if (IS_ERR(group))
817                 return PTR_ERR(group);
818
819         spin_lock(&ipmmu_slave_devices_lock);
820         list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
821         spin_unlock(&ipmmu_slave_devices_lock);
822         return 0;
823 }
824
825 static void ipmmu_remove_device_dma(struct device *dev)
826 {
827         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
828
829         spin_lock(&ipmmu_slave_devices_lock);
830         list_del(&priv->list);
831         spin_unlock(&ipmmu_slave_devices_lock);
832
833         iommu_group_remove_device(dev);
834 }
835
836 static struct device *ipmmu_find_sibling_device(struct device *dev)
837 {
838         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
839         struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
840         bool found = false;
841
842         spin_lock(&ipmmu_slave_devices_lock);
843
844         list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
845                 if (priv == sibling_priv)
846                         continue;
847                 if (sibling_priv->mmu == priv->mmu) {
848                         found = true;
849                         break;
850                 }
851         }
852
853         spin_unlock(&ipmmu_slave_devices_lock);
854
855         return found ? sibling_priv->dev : NULL;
856 }
857
858 static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
859 {
860         struct iommu_group *group;
861         struct device *sibling;
862
863         sibling = ipmmu_find_sibling_device(dev);
864         if (sibling)
865                 group = iommu_group_get(sibling);
866         if (!sibling || IS_ERR(group))
867                 group = generic_device_group(dev);
868
869         return group;
870 }
871
872 static const struct iommu_ops ipmmu_ops = {
873         .domain_alloc = ipmmu_domain_alloc_dma,
874         .domain_free = ipmmu_domain_free_dma,
875         .attach_dev = ipmmu_attach_device,
876         .detach_dev = ipmmu_detach_device,
877         .map = ipmmu_map,
878         .unmap = ipmmu_unmap,
879         .map_sg = default_iommu_map_sg,
880         .iova_to_phys = ipmmu_iova_to_phys,
881         .add_device = ipmmu_add_device_dma,
882         .remove_device = ipmmu_remove_device_dma,
883         .device_group = ipmmu_find_group_dma,
884         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
885         .of_xlate = ipmmu_of_xlate,
886 };
887
888 #endif /* CONFIG_IOMMU_DMA */
889
890 /* -----------------------------------------------------------------------------
891  * Probe/remove and init
892  */
893
894 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
895 {
896         unsigned int i;
897
898         /* Disable all contexts. */
899         for (i = 0; i < 4; ++i)
900                 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
901 }
902
903 static int ipmmu_probe(struct platform_device *pdev)
904 {
905         struct ipmmu_vmsa_device *mmu;
906         struct resource *res;
907         int irq;
908         int ret;
909
910         mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
911         if (!mmu) {
912                 dev_err(&pdev->dev, "cannot allocate device data\n");
913                 return -ENOMEM;
914         }
915
916         mmu->dev = &pdev->dev;
917         mmu->num_utlbs = 32;
918         spin_lock_init(&mmu->lock);
919         bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
920
921         /* Map I/O memory and request IRQ. */
922         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
923         mmu->base = devm_ioremap_resource(&pdev->dev, res);
924         if (IS_ERR(mmu->base))
925                 return PTR_ERR(mmu->base);
926
927         /*
928          * The IPMMU has two register banks, for secure and non-secure modes.
929          * The bank mapped at the beginning of the IPMMU address space
930          * corresponds to the running mode of the CPU. When running in secure
931          * mode the non-secure register bank is also available at an offset.
932          *
933          * Secure mode operation isn't clearly documented and is thus currently
934          * not implemented in the driver. Furthermore, preliminary tests of
935          * non-secure operation with the main register bank were not successful.
936          * Offset the registers base unconditionally to point to the non-secure
937          * alias space for now.
938          */
939         mmu->base += IM_NS_ALIAS_OFFSET;
940
941         irq = platform_get_irq(pdev, 0);
942         if (irq < 0) {
943                 dev_err(&pdev->dev, "no IRQ found\n");
944                 return irq;
945         }
946
947         ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
948                                dev_name(&pdev->dev), mmu);
949         if (ret < 0) {
950                 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
951                 return ret;
952         }
953
954         ipmmu_device_reset(mmu);
955
956         ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
957                                      dev_name(&pdev->dev));
958         if (ret)
959                 return ret;
960
961         iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
962         iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
963
964         ret = iommu_device_register(&mmu->iommu);
965         if (ret)
966                 return ret;
967
968         /*
969          * We can't create the ARM mapping here as it requires the bus to have
970          * an IOMMU, which only happens when bus_set_iommu() is called in
971          * ipmmu_init() after the probe function returns.
972          */
973
974         platform_set_drvdata(pdev, mmu);
975
976         return 0;
977 }
978
979 static int ipmmu_remove(struct platform_device *pdev)
980 {
981         struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
982
983         iommu_device_sysfs_remove(&mmu->iommu);
984         iommu_device_unregister(&mmu->iommu);
985
986 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
987         arm_iommu_release_mapping(mmu->mapping);
988 #endif
989
990         ipmmu_device_reset(mmu);
991
992         return 0;
993 }
994
995 static const struct of_device_id ipmmu_of_ids[] = {
996         { .compatible = "renesas,ipmmu-vmsa", },
997         { }
998 };
999
1000 static struct platform_driver ipmmu_driver = {
1001         .driver = {
1002                 .name = "ipmmu-vmsa",
1003                 .of_match_table = of_match_ptr(ipmmu_of_ids),
1004         },
1005         .probe = ipmmu_probe,
1006         .remove = ipmmu_remove,
1007 };
1008
1009 static int __init ipmmu_init(void)
1010 {
1011         int ret;
1012
1013         ret = platform_driver_register(&ipmmu_driver);
1014         if (ret < 0)
1015                 return ret;
1016
1017         if (!iommu_present(&platform_bus_type))
1018                 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1019
1020         return 0;
1021 }
1022
1023 static void __exit ipmmu_exit(void)
1024 {
1025         return platform_driver_unregister(&ipmmu_driver);
1026 }
1027
1028 subsys_initcall(ipmmu_init);
1029 module_exit(ipmmu_exit);
1030
1031 MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1032 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1033 MODULE_LICENSE("GPL v2");