iommu/io-pgtable-arm: Convert to IOMMU API TLB sync
[platform/kernel/linux-rpi.git] / drivers / iommu / ipmmu-vmsa.c
1 /*
2  * IPMMU VMSA
3  *
4  * Copyright (C) 2014 Renesas Electronics Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2 of the License.
9  */
10
11 #include <linux/bitmap.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iommu.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26
27 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
28 #include <asm/dma-iommu.h>
29 #include <asm/pgalloc.h>
30 #endif
31
32 #include "io-pgtable.h"
33
34 #define IPMMU_CTX_MAX 1
35
36 struct ipmmu_vmsa_device {
37         struct device *dev;
38         void __iomem *base;
39         struct iommu_device iommu;
40
41         unsigned int num_utlbs;
42         spinlock_t lock;                        /* Protects ctx and domains[] */
43         DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
44         struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
45
46         struct dma_iommu_mapping *mapping;
47 };
48
49 struct ipmmu_vmsa_domain {
50         struct ipmmu_vmsa_device *mmu;
51         struct iommu_domain io_domain;
52
53         struct io_pgtable_cfg cfg;
54         struct io_pgtable_ops *iop;
55
56         unsigned int context_id;
57         spinlock_t lock;                        /* Protects mappings */
58 };
59
60 struct ipmmu_vmsa_iommu_priv {
61         struct ipmmu_vmsa_device *mmu;
62         struct device *dev;
63         struct list_head list;
64 };
65
66 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
67 {
68         return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
69 }
70
71 static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
72 {
73         return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
74 }
75
76 #define TLB_LOOP_TIMEOUT                100     /* 100us */
77
78 /* -----------------------------------------------------------------------------
79  * Registers Definition
80  */
81
82 #define IM_NS_ALIAS_OFFSET              0x800
83
84 #define IM_CTX_SIZE                     0x40
85
86 #define IMCTR                           0x0000
87 #define IMCTR_TRE                       (1 << 17)
88 #define IMCTR_AFE                       (1 << 16)
89 #define IMCTR_RTSEL_MASK                (3 << 4)
90 #define IMCTR_RTSEL_SHIFT               4
91 #define IMCTR_TREN                      (1 << 3)
92 #define IMCTR_INTEN                     (1 << 2)
93 #define IMCTR_FLUSH                     (1 << 1)
94 #define IMCTR_MMUEN                     (1 << 0)
95
96 #define IMCAAR                          0x0004
97
98 #define IMTTBCR                         0x0008
99 #define IMTTBCR_EAE                     (1 << 31)
100 #define IMTTBCR_PMB                     (1 << 30)
101 #define IMTTBCR_SH1_NON_SHAREABLE       (0 << 28)
102 #define IMTTBCR_SH1_OUTER_SHAREABLE     (2 << 28)
103 #define IMTTBCR_SH1_INNER_SHAREABLE     (3 << 28)
104 #define IMTTBCR_SH1_MASK                (3 << 28)
105 #define IMTTBCR_ORGN1_NC                (0 << 26)
106 #define IMTTBCR_ORGN1_WB_WA             (1 << 26)
107 #define IMTTBCR_ORGN1_WT                (2 << 26)
108 #define IMTTBCR_ORGN1_WB                (3 << 26)
109 #define IMTTBCR_ORGN1_MASK              (3 << 26)
110 #define IMTTBCR_IRGN1_NC                (0 << 24)
111 #define IMTTBCR_IRGN1_WB_WA             (1 << 24)
112 #define IMTTBCR_IRGN1_WT                (2 << 24)
113 #define IMTTBCR_IRGN1_WB                (3 << 24)
114 #define IMTTBCR_IRGN1_MASK              (3 << 24)
115 #define IMTTBCR_TSZ1_MASK               (7 << 16)
116 #define IMTTBCR_TSZ1_SHIFT              16
117 #define IMTTBCR_SH0_NON_SHAREABLE       (0 << 12)
118 #define IMTTBCR_SH0_OUTER_SHAREABLE     (2 << 12)
119 #define IMTTBCR_SH0_INNER_SHAREABLE     (3 << 12)
120 #define IMTTBCR_SH0_MASK                (3 << 12)
121 #define IMTTBCR_ORGN0_NC                (0 << 10)
122 #define IMTTBCR_ORGN0_WB_WA             (1 << 10)
123 #define IMTTBCR_ORGN0_WT                (2 << 10)
124 #define IMTTBCR_ORGN0_WB                (3 << 10)
125 #define IMTTBCR_ORGN0_MASK              (3 << 10)
126 #define IMTTBCR_IRGN0_NC                (0 << 8)
127 #define IMTTBCR_IRGN0_WB_WA             (1 << 8)
128 #define IMTTBCR_IRGN0_WT                (2 << 8)
129 #define IMTTBCR_IRGN0_WB                (3 << 8)
130 #define IMTTBCR_IRGN0_MASK              (3 << 8)
131 #define IMTTBCR_SL0_LVL_2               (0 << 4)
132 #define IMTTBCR_SL0_LVL_1               (1 << 4)
133 #define IMTTBCR_TSZ0_MASK               (7 << 0)
134 #define IMTTBCR_TSZ0_SHIFT              O
135
136 #define IMBUSCR                         0x000c
137 #define IMBUSCR_DVM                     (1 << 2)
138 #define IMBUSCR_BUSSEL_SYS              (0 << 0)
139 #define IMBUSCR_BUSSEL_CCI              (1 << 0)
140 #define IMBUSCR_BUSSEL_IMCAAR           (2 << 0)
141 #define IMBUSCR_BUSSEL_CCI_IMCAAR       (3 << 0)
142 #define IMBUSCR_BUSSEL_MASK             (3 << 0)
143
144 #define IMTTLBR0                        0x0010
145 #define IMTTUBR0                        0x0014
146 #define IMTTLBR1                        0x0018
147 #define IMTTUBR1                        0x001c
148
149 #define IMSTR                           0x0020
150 #define IMSTR_ERRLVL_MASK               (3 << 12)
151 #define IMSTR_ERRLVL_SHIFT              12
152 #define IMSTR_ERRCODE_TLB_FORMAT        (1 << 8)
153 #define IMSTR_ERRCODE_ACCESS_PERM       (4 << 8)
154 #define IMSTR_ERRCODE_SECURE_ACCESS     (5 << 8)
155 #define IMSTR_ERRCODE_MASK              (7 << 8)
156 #define IMSTR_MHIT                      (1 << 4)
157 #define IMSTR_ABORT                     (1 << 2)
158 #define IMSTR_PF                        (1 << 1)
159 #define IMSTR_TF                        (1 << 0)
160
161 #define IMMAIR0                         0x0028
162 #define IMMAIR1                         0x002c
163 #define IMMAIR_ATTR_MASK                0xff
164 #define IMMAIR_ATTR_DEVICE              0x04
165 #define IMMAIR_ATTR_NC                  0x44
166 #define IMMAIR_ATTR_WBRWA               0xff
167 #define IMMAIR_ATTR_SHIFT(n)            ((n) << 3)
168 #define IMMAIR_ATTR_IDX_NC              0
169 #define IMMAIR_ATTR_IDX_WBRWA           1
170 #define IMMAIR_ATTR_IDX_DEV             2
171
172 #define IMEAR                           0x0030
173
174 #define IMPCTR                          0x0200
175 #define IMPSTR                          0x0208
176 #define IMPEAR                          0x020c
177 #define IMPMBA(n)                       (0x0280 + ((n) * 4))
178 #define IMPMBD(n)                       (0x02c0 + ((n) * 4))
179
180 #define IMUCTR(n)                       (0x0300 + ((n) * 16))
181 #define IMUCTR_FIXADDEN                 (1 << 31)
182 #define IMUCTR_FIXADD_MASK              (0xff << 16)
183 #define IMUCTR_FIXADD_SHIFT             16
184 #define IMUCTR_TTSEL_MMU(n)             ((n) << 4)
185 #define IMUCTR_TTSEL_PMB                (8 << 4)
186 #define IMUCTR_TTSEL_MASK               (15 << 4)
187 #define IMUCTR_FLUSH                    (1 << 1)
188 #define IMUCTR_MMUEN                    (1 << 0)
189
190 #define IMUASID(n)                      (0x0308 + ((n) * 16))
191 #define IMUASID_ASID8_MASK              (0xff << 8)
192 #define IMUASID_ASID8_SHIFT             8
193 #define IMUASID_ASID0_MASK              (0xff << 0)
194 #define IMUASID_ASID0_SHIFT             0
195
196 /* -----------------------------------------------------------------------------
197  * Read/Write Access
198  */
199
200 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
201 {
202         return ioread32(mmu->base + offset);
203 }
204
205 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
206                         u32 data)
207 {
208         iowrite32(data, mmu->base + offset);
209 }
210
211 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
212 {
213         return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
214 }
215
216 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
217                             u32 data)
218 {
219         ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
220 }
221
222 /* -----------------------------------------------------------------------------
223  * TLB and microTLB Management
224  */
225
226 /* Wait for any pending TLB invalidations to complete */
227 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
228 {
229         unsigned int count = 0;
230
231         while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
232                 cpu_relax();
233                 if (++count == TLB_LOOP_TIMEOUT) {
234                         dev_err_ratelimited(domain->mmu->dev,
235                         "TLB sync timed out -- MMU may be deadlocked\n");
236                         return;
237                 }
238                 udelay(1);
239         }
240 }
241
242 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
243 {
244         u32 reg;
245
246         reg = ipmmu_ctx_read(domain, IMCTR);
247         reg |= IMCTR_FLUSH;
248         ipmmu_ctx_write(domain, IMCTR, reg);
249
250         ipmmu_tlb_sync(domain);
251 }
252
253 /*
254  * Enable MMU translation for the microTLB.
255  */
256 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
257                               unsigned int utlb)
258 {
259         struct ipmmu_vmsa_device *mmu = domain->mmu;
260
261         /*
262          * TODO: Reference-count the microTLB as several bus masters can be
263          * connected to the same microTLB.
264          */
265
266         /* TODO: What should we set the ASID to ? */
267         ipmmu_write(mmu, IMUASID(utlb), 0);
268         /* TODO: Do we need to flush the microTLB ? */
269         ipmmu_write(mmu, IMUCTR(utlb),
270                     IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
271                     IMUCTR_MMUEN);
272 }
273
274 /*
275  * Disable MMU translation for the microTLB.
276  */
277 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
278                                unsigned int utlb)
279 {
280         struct ipmmu_vmsa_device *mmu = domain->mmu;
281
282         ipmmu_write(mmu, IMUCTR(utlb), 0);
283 }
284
285 static void ipmmu_tlb_flush_all(void *cookie)
286 {
287         struct ipmmu_vmsa_domain *domain = cookie;
288
289         ipmmu_tlb_invalidate(domain);
290 }
291
292 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
293                                 size_t granule, bool leaf, void *cookie)
294 {
295         /* The hardware doesn't support selective TLB flush. */
296 }
297
298 static const struct iommu_gather_ops ipmmu_gather_ops = {
299         .tlb_flush_all = ipmmu_tlb_flush_all,
300         .tlb_add_flush = ipmmu_tlb_add_flush,
301         .tlb_sync = ipmmu_tlb_flush_all,
302 };
303
304 /* -----------------------------------------------------------------------------
305  * Domain/Context Management
306  */
307
308 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
309                                          struct ipmmu_vmsa_domain *domain)
310 {
311         unsigned long flags;
312         int ret;
313
314         spin_lock_irqsave(&mmu->lock, flags);
315
316         ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
317         if (ret != IPMMU_CTX_MAX) {
318                 mmu->domains[ret] = domain;
319                 set_bit(ret, mmu->ctx);
320         }
321
322         spin_unlock_irqrestore(&mmu->lock, flags);
323
324         return ret;
325 }
326
327 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
328                                       unsigned int context_id)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&mmu->lock, flags);
333
334         clear_bit(context_id, mmu->ctx);
335         mmu->domains[context_id] = NULL;
336
337         spin_unlock_irqrestore(&mmu->lock, flags);
338 }
339
340 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
341 {
342         u64 ttbr;
343         int ret;
344
345         /*
346          * Allocate the page table operations.
347          *
348          * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
349          * access, Long-descriptor format" that the NStable bit being set in a
350          * table descriptor will result in the NStable and NS bits of all child
351          * entries being ignored and considered as being set. The IPMMU seems
352          * not to comply with this, as it generates a secure access page fault
353          * if any of the NStable and NS bits isn't set when running in
354          * non-secure mode.
355          */
356         domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
357         domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
358         domain->cfg.ias = 32;
359         domain->cfg.oas = 40;
360         domain->cfg.tlb = &ipmmu_gather_ops;
361         domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
362         domain->io_domain.geometry.force_aperture = true;
363         /*
364          * TODO: Add support for coherent walk through CCI with DVM and remove
365          * cache handling. For now, delegate it to the io-pgtable code.
366          */
367         domain->cfg.iommu_dev = domain->mmu->dev;
368
369         /*
370          * Find an unused context.
371          */
372         ret = ipmmu_domain_allocate_context(domain->mmu, domain);
373         if (ret == IPMMU_CTX_MAX)
374                 return -EBUSY;
375
376         domain->context_id = ret;
377
378         domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
379                                            domain);
380         if (!domain->iop) {
381                 ipmmu_domain_free_context(domain->mmu, domain->context_id);
382                 return -EINVAL;
383         }
384
385         /* TTBR0 */
386         ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
387         ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
388         ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
389
390         /*
391          * TTBCR
392          * We use long descriptors with inner-shareable WBWA tables and allocate
393          * the whole 32-bit VA space to TTBR0.
394          */
395         ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
396                         IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
397                         IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
398
399         /* MAIR0 */
400         ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
401
402         /* IMBUSCR */
403         ipmmu_ctx_write(domain, IMBUSCR,
404                         ipmmu_ctx_read(domain, IMBUSCR) &
405                         ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
406
407         /*
408          * IMSTR
409          * Clear all interrupt flags.
410          */
411         ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
412
413         /*
414          * IMCTR
415          * Enable the MMU and interrupt generation. The long-descriptor
416          * translation table format doesn't use TEX remapping. Don't enable AF
417          * software management as we have no use for it. Flush the TLB as
418          * required when modifying the context registers.
419          */
420         ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
421
422         return 0;
423 }
424
425 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
426 {
427         /*
428          * Disable the context. Flush the TLB as required when modifying the
429          * context registers.
430          *
431          * TODO: Is TLB flush really needed ?
432          */
433         ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
434         ipmmu_tlb_sync(domain);
435         ipmmu_domain_free_context(domain->mmu, domain->context_id);
436 }
437
438 /* -----------------------------------------------------------------------------
439  * Fault Handling
440  */
441
442 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
443 {
444         const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
445         struct ipmmu_vmsa_device *mmu = domain->mmu;
446         u32 status;
447         u32 iova;
448
449         status = ipmmu_ctx_read(domain, IMSTR);
450         if (!(status & err_mask))
451                 return IRQ_NONE;
452
453         iova = ipmmu_ctx_read(domain, IMEAR);
454
455         /*
456          * Clear the error status flags. Unlike traditional interrupt flag
457          * registers that must be cleared by writing 1, this status register
458          * seems to require 0. The error address register must be read before,
459          * otherwise its value will be 0.
460          */
461         ipmmu_ctx_write(domain, IMSTR, 0);
462
463         /* Log fatal errors. */
464         if (status & IMSTR_MHIT)
465                 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
466                                     iova);
467         if (status & IMSTR_ABORT)
468                 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
469                                     iova);
470
471         if (!(status & (IMSTR_PF | IMSTR_TF)))
472                 return IRQ_NONE;
473
474         /*
475          * Try to handle page faults and translation faults.
476          *
477          * TODO: We need to look up the faulty device based on the I/O VA. Use
478          * the IOMMU device for now.
479          */
480         if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
481                 return IRQ_HANDLED;
482
483         dev_err_ratelimited(mmu->dev,
484                             "Unhandled fault: status 0x%08x iova 0x%08x\n",
485                             status, iova);
486
487         return IRQ_HANDLED;
488 }
489
490 static irqreturn_t ipmmu_irq(int irq, void *dev)
491 {
492         struct ipmmu_vmsa_device *mmu = dev;
493         irqreturn_t status = IRQ_NONE;
494         unsigned int i;
495         unsigned long flags;
496
497         spin_lock_irqsave(&mmu->lock, flags);
498
499         /*
500          * Check interrupts for all active contexts.
501          */
502         for (i = 0; i < IPMMU_CTX_MAX; i++) {
503                 if (!mmu->domains[i])
504                         continue;
505                 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
506                         status = IRQ_HANDLED;
507         }
508
509         spin_unlock_irqrestore(&mmu->lock, flags);
510
511         return status;
512 }
513
514 /* -----------------------------------------------------------------------------
515  * IOMMU Operations
516  */
517
518 static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
519 {
520         struct ipmmu_vmsa_domain *domain;
521
522         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
523         if (!domain)
524                 return NULL;
525
526         spin_lock_init(&domain->lock);
527
528         return &domain->io_domain;
529 }
530
531 static void ipmmu_domain_free(struct iommu_domain *io_domain)
532 {
533         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
534
535         /*
536          * Free the domain resources. We assume that all devices have already
537          * been detached.
538          */
539         ipmmu_domain_destroy_context(domain);
540         free_io_pgtable_ops(domain->iop);
541         kfree(domain);
542 }
543
544 static int ipmmu_attach_device(struct iommu_domain *io_domain,
545                                struct device *dev)
546 {
547         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
548         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
549         struct ipmmu_vmsa_device *mmu = priv->mmu;
550         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
551         unsigned long flags;
552         unsigned int i;
553         int ret = 0;
554
555         if (!priv || !priv->mmu) {
556                 dev_err(dev, "Cannot attach to IPMMU\n");
557                 return -ENXIO;
558         }
559
560         spin_lock_irqsave(&domain->lock, flags);
561
562         if (!domain->mmu) {
563                 /* The domain hasn't been used yet, initialize it. */
564                 domain->mmu = mmu;
565                 ret = ipmmu_domain_init_context(domain);
566         } else if (domain->mmu != mmu) {
567                 /*
568                  * Something is wrong, we can't attach two devices using
569                  * different IOMMUs to the same domain.
570                  */
571                 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
572                         dev_name(mmu->dev), dev_name(domain->mmu->dev));
573                 ret = -EINVAL;
574         } else
575                 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
576
577         spin_unlock_irqrestore(&domain->lock, flags);
578
579         if (ret < 0)
580                 return ret;
581
582         for (i = 0; i < fwspec->num_ids; ++i)
583                 ipmmu_utlb_enable(domain, fwspec->ids[i]);
584
585         return 0;
586 }
587
588 static void ipmmu_detach_device(struct iommu_domain *io_domain,
589                                 struct device *dev)
590 {
591         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
592         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
593         unsigned int i;
594
595         for (i = 0; i < fwspec->num_ids; ++i)
596                 ipmmu_utlb_disable(domain, fwspec->ids[i]);
597
598         /*
599          * TODO: Optimize by disabling the context when no device is attached.
600          */
601 }
602
603 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
604                      phys_addr_t paddr, size_t size, int prot)
605 {
606         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
607
608         if (!domain)
609                 return -ENODEV;
610
611         return domain->iop->map(domain->iop, iova, paddr, size, prot);
612 }
613
614 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
615                           size_t size)
616 {
617         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
618
619         return domain->iop->unmap(domain->iop, iova, size);
620 }
621
622 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
623 {
624         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
625
626         if (domain->mmu)
627                 ipmmu_tlb_flush_all(domain);
628 }
629
630 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
631                                       dma_addr_t iova)
632 {
633         struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
634
635         /* TODO: Is locking needed ? */
636
637         return domain->iop->iova_to_phys(domain->iop, iova);
638 }
639
640 static int ipmmu_init_platform_device(struct device *dev,
641                                       struct of_phandle_args *args)
642 {
643         struct platform_device *ipmmu_pdev;
644         struct ipmmu_vmsa_iommu_priv *priv;
645
646         ipmmu_pdev = of_find_device_by_node(args->np);
647         if (!ipmmu_pdev)
648                 return -ENODEV;
649
650         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
651         if (!priv)
652                 return -ENOMEM;
653
654         priv->mmu = platform_get_drvdata(ipmmu_pdev);
655         priv->dev = dev;
656         dev->iommu_fwspec->iommu_priv = priv;
657         return 0;
658 }
659
660 static int ipmmu_of_xlate(struct device *dev,
661                           struct of_phandle_args *spec)
662 {
663         iommu_fwspec_add_ids(dev, spec->args, 1);
664
665         /* Initialize once - xlate() will call multiple times */
666         if (to_priv(dev))
667                 return 0;
668
669         return ipmmu_init_platform_device(dev, spec);
670 }
671
672 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
673
674 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
675 {
676         if (type != IOMMU_DOMAIN_UNMANAGED)
677                 return NULL;
678
679         return __ipmmu_domain_alloc(type);
680 }
681
682 static int ipmmu_add_device(struct device *dev)
683 {
684         struct ipmmu_vmsa_device *mmu = NULL;
685         struct iommu_group *group;
686         int ret;
687
688         /*
689          * Only let through devices that have been verified in xlate()
690          */
691         if (!to_priv(dev))
692                 return -ENODEV;
693
694         /* Create a device group and add the device to it. */
695         group = iommu_group_alloc();
696         if (IS_ERR(group)) {
697                 dev_err(dev, "Failed to allocate IOMMU group\n");
698                 ret = PTR_ERR(group);
699                 goto error;
700         }
701
702         ret = iommu_group_add_device(group, dev);
703         iommu_group_put(group);
704
705         if (ret < 0) {
706                 dev_err(dev, "Failed to add device to IPMMU group\n");
707                 group = NULL;
708                 goto error;
709         }
710
711         /*
712          * Create the ARM mapping, used by the ARM DMA mapping core to allocate
713          * VAs. This will allocate a corresponding IOMMU domain.
714          *
715          * TODO:
716          * - Create one mapping per context (TLB).
717          * - Make the mapping size configurable ? We currently use a 2GB mapping
718          *   at a 1GB offset to ensure that NULL VAs will fault.
719          */
720         mmu = to_priv(dev)->mmu;
721         if (!mmu->mapping) {
722                 struct dma_iommu_mapping *mapping;
723
724                 mapping = arm_iommu_create_mapping(&platform_bus_type,
725                                                    SZ_1G, SZ_2G);
726                 if (IS_ERR(mapping)) {
727                         dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
728                         ret = PTR_ERR(mapping);
729                         goto error;
730                 }
731
732                 mmu->mapping = mapping;
733         }
734
735         /* Attach the ARM VA mapping to the device. */
736         ret = arm_iommu_attach_device(dev, mmu->mapping);
737         if (ret < 0) {
738                 dev_err(dev, "Failed to attach device to VA mapping\n");
739                 goto error;
740         }
741
742         return 0;
743
744 error:
745         if (mmu)
746                 arm_iommu_release_mapping(mmu->mapping);
747
748         if (!IS_ERR_OR_NULL(group))
749                 iommu_group_remove_device(dev);
750
751         return ret;
752 }
753
754 static void ipmmu_remove_device(struct device *dev)
755 {
756         arm_iommu_detach_device(dev);
757         iommu_group_remove_device(dev);
758 }
759
760 static const struct iommu_ops ipmmu_ops = {
761         .domain_alloc = ipmmu_domain_alloc,
762         .domain_free = ipmmu_domain_free,
763         .attach_dev = ipmmu_attach_device,
764         .detach_dev = ipmmu_detach_device,
765         .map = ipmmu_map,
766         .unmap = ipmmu_unmap,
767         .map_sg = default_iommu_map_sg,
768         .iova_to_phys = ipmmu_iova_to_phys,
769         .add_device = ipmmu_add_device,
770         .remove_device = ipmmu_remove_device,
771         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
772         .of_xlate = ipmmu_of_xlate,
773 };
774
775 #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
776
777 #ifdef CONFIG_IOMMU_DMA
778
779 static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
780 static LIST_HEAD(ipmmu_slave_devices);
781
782 static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
783 {
784         struct iommu_domain *io_domain = NULL;
785
786         switch (type) {
787         case IOMMU_DOMAIN_UNMANAGED:
788                 io_domain = __ipmmu_domain_alloc(type);
789                 break;
790
791         case IOMMU_DOMAIN_DMA:
792                 io_domain = __ipmmu_domain_alloc(type);
793                 if (io_domain)
794                         iommu_get_dma_cookie(io_domain);
795                 break;
796         }
797
798         return io_domain;
799 }
800
801 static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
802 {
803         switch (io_domain->type) {
804         case IOMMU_DOMAIN_DMA:
805                 iommu_put_dma_cookie(io_domain);
806                 /* fall-through */
807         default:
808                 ipmmu_domain_free(io_domain);
809                 break;
810         }
811 }
812
813 static int ipmmu_add_device_dma(struct device *dev)
814 {
815         struct iommu_group *group;
816
817         /*
818          * Only let through devices that have been verified in xlate()
819          */
820         if (!to_priv(dev))
821                 return -ENODEV;
822
823         group = iommu_group_get_for_dev(dev);
824         if (IS_ERR(group))
825                 return PTR_ERR(group);
826
827         spin_lock(&ipmmu_slave_devices_lock);
828         list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
829         spin_unlock(&ipmmu_slave_devices_lock);
830         return 0;
831 }
832
833 static void ipmmu_remove_device_dma(struct device *dev)
834 {
835         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
836
837         spin_lock(&ipmmu_slave_devices_lock);
838         list_del(&priv->list);
839         spin_unlock(&ipmmu_slave_devices_lock);
840
841         iommu_group_remove_device(dev);
842 }
843
844 static struct device *ipmmu_find_sibling_device(struct device *dev)
845 {
846         struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
847         struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
848         bool found = false;
849
850         spin_lock(&ipmmu_slave_devices_lock);
851
852         list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
853                 if (priv == sibling_priv)
854                         continue;
855                 if (sibling_priv->mmu == priv->mmu) {
856                         found = true;
857                         break;
858                 }
859         }
860
861         spin_unlock(&ipmmu_slave_devices_lock);
862
863         return found ? sibling_priv->dev : NULL;
864 }
865
866 static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
867 {
868         struct iommu_group *group;
869         struct device *sibling;
870
871         sibling = ipmmu_find_sibling_device(dev);
872         if (sibling)
873                 group = iommu_group_get(sibling);
874         if (!sibling || IS_ERR(group))
875                 group = generic_device_group(dev);
876
877         return group;
878 }
879
880 static const struct iommu_ops ipmmu_ops = {
881         .domain_alloc = ipmmu_domain_alloc_dma,
882         .domain_free = ipmmu_domain_free_dma,
883         .attach_dev = ipmmu_attach_device,
884         .detach_dev = ipmmu_detach_device,
885         .map = ipmmu_map,
886         .unmap = ipmmu_unmap,
887         .flush_iotlb_all = ipmmu_iotlb_sync,
888         .iotlb_sync = ipmmu_iotlb_sync,
889         .map_sg = default_iommu_map_sg,
890         .iova_to_phys = ipmmu_iova_to_phys,
891         .add_device = ipmmu_add_device_dma,
892         .remove_device = ipmmu_remove_device_dma,
893         .device_group = ipmmu_find_group_dma,
894         .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
895         .of_xlate = ipmmu_of_xlate,
896 };
897
898 #endif /* CONFIG_IOMMU_DMA */
899
900 /* -----------------------------------------------------------------------------
901  * Probe/remove and init
902  */
903
904 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
905 {
906         unsigned int i;
907
908         /* Disable all contexts. */
909         for (i = 0; i < 4; ++i)
910                 ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
911 }
912
913 static int ipmmu_probe(struct platform_device *pdev)
914 {
915         struct ipmmu_vmsa_device *mmu;
916         struct resource *res;
917         int irq;
918         int ret;
919
920         mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
921         if (!mmu) {
922                 dev_err(&pdev->dev, "cannot allocate device data\n");
923                 return -ENOMEM;
924         }
925
926         mmu->dev = &pdev->dev;
927         mmu->num_utlbs = 32;
928         spin_lock_init(&mmu->lock);
929         bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
930
931         /* Map I/O memory and request IRQ. */
932         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
933         mmu->base = devm_ioremap_resource(&pdev->dev, res);
934         if (IS_ERR(mmu->base))
935                 return PTR_ERR(mmu->base);
936
937         /*
938          * The IPMMU has two register banks, for secure and non-secure modes.
939          * The bank mapped at the beginning of the IPMMU address space
940          * corresponds to the running mode of the CPU. When running in secure
941          * mode the non-secure register bank is also available at an offset.
942          *
943          * Secure mode operation isn't clearly documented and is thus currently
944          * not implemented in the driver. Furthermore, preliminary tests of
945          * non-secure operation with the main register bank were not successful.
946          * Offset the registers base unconditionally to point to the non-secure
947          * alias space for now.
948          */
949         mmu->base += IM_NS_ALIAS_OFFSET;
950
951         irq = platform_get_irq(pdev, 0);
952         if (irq < 0) {
953                 dev_err(&pdev->dev, "no IRQ found\n");
954                 return irq;
955         }
956
957         ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
958                                dev_name(&pdev->dev), mmu);
959         if (ret < 0) {
960                 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
961                 return ret;
962         }
963
964         ipmmu_device_reset(mmu);
965
966         ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
967                                      dev_name(&pdev->dev));
968         if (ret)
969                 return ret;
970
971         iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
972         iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
973
974         ret = iommu_device_register(&mmu->iommu);
975         if (ret)
976                 return ret;
977
978         /*
979          * We can't create the ARM mapping here as it requires the bus to have
980          * an IOMMU, which only happens when bus_set_iommu() is called in
981          * ipmmu_init() after the probe function returns.
982          */
983
984         platform_set_drvdata(pdev, mmu);
985
986         return 0;
987 }
988
989 static int ipmmu_remove(struct platform_device *pdev)
990 {
991         struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
992
993         iommu_device_sysfs_remove(&mmu->iommu);
994         iommu_device_unregister(&mmu->iommu);
995
996 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
997         arm_iommu_release_mapping(mmu->mapping);
998 #endif
999
1000         ipmmu_device_reset(mmu);
1001
1002         return 0;
1003 }
1004
1005 static const struct of_device_id ipmmu_of_ids[] = {
1006         { .compatible = "renesas,ipmmu-vmsa", },
1007         { }
1008 };
1009
1010 static struct platform_driver ipmmu_driver = {
1011         .driver = {
1012                 .name = "ipmmu-vmsa",
1013                 .of_match_table = of_match_ptr(ipmmu_of_ids),
1014         },
1015         .probe = ipmmu_probe,
1016         .remove = ipmmu_remove,
1017 };
1018
1019 static int __init ipmmu_init(void)
1020 {
1021         int ret;
1022
1023         ret = platform_driver_register(&ipmmu_driver);
1024         if (ret < 0)
1025                 return ret;
1026
1027         if (!iommu_present(&platform_bus_type))
1028                 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1029
1030         return 0;
1031 }
1032
1033 static void __exit ipmmu_exit(void)
1034 {
1035         return platform_driver_unregister(&ipmmu_driver);
1036 }
1037
1038 subsys_initcall(ipmmu_init);
1039 module_exit(ipmmu_exit);
1040
1041 MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
1042 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1043 MODULE_LICENSE("GPL v2");