drm/etnaviv: mmuv2: allocate 2nd level page tables on demand
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
1 /*
2  * Copyright (C) 2016 Etnaviv Project
3   *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/platform_device.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/bitops.h>
22
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
29
30 #define MMUv2_PTE_PRESENT               BIT(0)
31 #define MMUv2_PTE_EXCEPTION             BIT(1)
32 #define MMUv2_PTE_WRITEABLE             BIT(2)
33
34 #define MMUv2_MTLB_MASK                 0xffc00000
35 #define MMUv2_MTLB_SHIFT                22
36 #define MMUv2_STLB_MASK                 0x003ff000
37 #define MMUv2_STLB_SHIFT                12
38
39 #define MMUv2_MAX_STLB_ENTRIES          1024
40
41 struct etnaviv_iommuv2_domain {
42         struct etnaviv_iommu_domain base;
43         /* P(age) T(able) A(rray) */
44         u64 *pta_cpu;
45         dma_addr_t pta_dma;
46         /* M(aster) TLB aka first level pagetable */
47         u32 *mtlb_cpu;
48         dma_addr_t mtlb_dma;
49         /* S(lave) TLB aka second level pagetable */
50         u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
51         dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
52 };
53
54 static struct etnaviv_iommuv2_domain *
55 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
56 {
57         return container_of(domain, struct etnaviv_iommuv2_domain, base);
58 }
59
60 static int
61 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
62                             int stlb)
63 {
64         if (etnaviv_domain->stlb_cpu[stlb])
65                 return 0;
66
67         etnaviv_domain->stlb_cpu[stlb] =
68                         dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
69                                      &etnaviv_domain->stlb_dma[stlb],
70                                      GFP_KERNEL);
71
72         if (!etnaviv_domain->stlb_cpu[stlb])
73                 return -ENOMEM;
74
75         memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
76                  SZ_4K / sizeof(u32));
77
78         etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
79                                                       MMUv2_PTE_PRESENT;
80         return 0;
81 }
82
83 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
84                                unsigned long iova, phys_addr_t paddr,
85                                size_t size, int prot)
86 {
87         struct etnaviv_iommuv2_domain *etnaviv_domain =
88                         to_etnaviv_domain(domain);
89         int mtlb_entry, stlb_entry, ret;
90         u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
91
92         if (size != SZ_4K)
93                 return -EINVAL;
94
95         if (prot & ETNAVIV_PROT_WRITE)
96                 entry |= MMUv2_PTE_WRITEABLE;
97
98         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
99         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
100
101         ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
102         if (ret)
103                 return ret;
104
105         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
106
107         return 0;
108 }
109
110 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
111                                     unsigned long iova, size_t size)
112 {
113         struct etnaviv_iommuv2_domain *etnaviv_domain =
114                         to_etnaviv_domain(domain);
115         int mtlb_entry, stlb_entry;
116
117         if (size != SZ_4K)
118                 return -EINVAL;
119
120         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
121         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
122
123         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
124
125         return SZ_4K;
126 }
127
128 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
129 {
130         u32 *p;
131         int ret, i;
132
133         /* allocate scratch page */
134         etnaviv_domain->base.bad_page_cpu =
135                         dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
136                                      &etnaviv_domain->base.bad_page_dma,
137                                      GFP_KERNEL);
138         if (!etnaviv_domain->base.bad_page_cpu) {
139                 ret = -ENOMEM;
140                 goto fail_mem;
141         }
142         p = etnaviv_domain->base.bad_page_cpu;
143         for (i = 0; i < SZ_4K / 4; i++)
144                 *p++ = 0xdead55aa;
145
146         etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
147                                                SZ_4K, &etnaviv_domain->pta_dma,
148                                                GFP_KERNEL);
149         if (!etnaviv_domain->pta_cpu) {
150                 ret = -ENOMEM;
151                 goto fail_mem;
152         }
153
154         etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
155                                                 SZ_4K, &etnaviv_domain->mtlb_dma,
156                                                 GFP_KERNEL);
157         if (!etnaviv_domain->mtlb_cpu) {
158                 ret = -ENOMEM;
159                 goto fail_mem;
160         }
161
162         memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
163                  MMUv2_MAX_STLB_ENTRIES);
164
165         return 0;
166
167 fail_mem:
168         if (etnaviv_domain->base.bad_page_cpu)
169                 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
170                             etnaviv_domain->base.bad_page_cpu,
171                             etnaviv_domain->base.bad_page_dma);
172
173         if (etnaviv_domain->pta_cpu)
174                 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
175                             etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
176
177         if (etnaviv_domain->mtlb_cpu)
178                 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
179                             etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
180
181         return ret;
182 }
183
184 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
185 {
186         struct etnaviv_iommuv2_domain *etnaviv_domain =
187                         to_etnaviv_domain(domain);
188         int i;
189
190         dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
191                     etnaviv_domain->base.bad_page_cpu,
192                     etnaviv_domain->base.bad_page_dma);
193
194         dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
195                     etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
196
197         dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
198                     etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
199
200         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
201                 if (etnaviv_domain->stlb_cpu[i])
202                         dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
203                                     etnaviv_domain->stlb_cpu[i],
204                                     etnaviv_domain->stlb_dma[i]);
205         }
206
207         vfree(etnaviv_domain);
208 }
209
210 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
211 {
212         struct etnaviv_iommuv2_domain *etnaviv_domain =
213                         to_etnaviv_domain(domain);
214         size_t dump_size = SZ_4K;
215         int i;
216
217         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
218                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
219                         dump_size += SZ_4K;
220
221         return dump_size;
222 }
223
224 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
225 {
226         struct etnaviv_iommuv2_domain *etnaviv_domain =
227                         to_etnaviv_domain(domain);
228         int i;
229
230         memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
231         buf += SZ_4K;
232         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
233                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
234                         memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
235 }
236
237 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
238 {
239         struct etnaviv_iommuv2_domain *etnaviv_domain =
240                         to_etnaviv_domain(gpu->mmu->domain);
241         u16 prefetch;
242
243         /* If the MMU is already enabled the state is still there. */
244         if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
245                 return;
246
247         prefetch = etnaviv_buffer_config_mmuv2(gpu,
248                                 (u32)etnaviv_domain->mtlb_dma,
249                                 (u32)etnaviv_domain->base.bad_page_dma);
250         etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
251                              prefetch);
252         etnaviv_gpu_wait_idle(gpu, 100);
253
254         gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
255 }
256
257 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
258 {
259         struct etnaviv_iommuv2_domain *etnaviv_domain =
260                                 to_etnaviv_domain(gpu->mmu->domain);
261         u16 prefetch;
262
263         /* If the MMU is already enabled the state is still there. */
264         if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
265                 return;
266
267         gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
268                   lower_32_bits(etnaviv_domain->pta_dma));
269         gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
270                   upper_32_bits(etnaviv_domain->pta_dma));
271         gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
272
273         gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
274                   lower_32_bits(etnaviv_domain->base.bad_page_dma));
275         gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
276                   lower_32_bits(etnaviv_domain->base.bad_page_dma));
277         gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
278                   VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
279                   upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
280                   VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
281                   upper_32_bits(etnaviv_domain->base.bad_page_dma)));
282
283         etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
284                                      VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
285
286         /* trigger a PTA load through the FE */
287         prefetch = etnaviv_buffer_config_pta(gpu);
288         etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
289                              prefetch);
290         etnaviv_gpu_wait_idle(gpu, 100);
291
292         gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
293 }
294
295 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
296 {
297         switch (gpu->sec_mode) {
298         case ETNA_SEC_NONE:
299                 etnaviv_iommuv2_restore_nonsec(gpu);
300                 break;
301         case ETNA_SEC_KERNEL:
302                 etnaviv_iommuv2_restore_sec(gpu);
303                 break;
304         default:
305                 WARN(1, "unhandled GPU security mode\n");
306                 break;
307         }
308 }
309
310 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
311         .free = etnaviv_iommuv2_domain_free,
312         .map = etnaviv_iommuv2_map,
313         .unmap = etnaviv_iommuv2_unmap,
314         .dump_size = etnaviv_iommuv2_dump_size,
315         .dump = etnaviv_iommuv2_dump,
316 };
317
318 struct etnaviv_iommu_domain *
319 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
320 {
321         struct etnaviv_iommuv2_domain *etnaviv_domain;
322         struct etnaviv_iommu_domain *domain;
323         int ret;
324
325         etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
326         if (!etnaviv_domain)
327                 return NULL;
328
329         domain = &etnaviv_domain->base;
330
331         domain->dev = gpu->dev;
332         domain->base = 0;
333         domain->size = (u64)SZ_1G * 4;
334         domain->ops = &etnaviv_iommuv2_ops;
335
336         ret = etnaviv_iommuv2_init(etnaviv_domain);
337         if (ret)
338                 goto out_free;
339
340         return &etnaviv_domain->base;
341
342 out_free:
343         vfree(etnaviv_domain);
344         return NULL;
345 }