Merge tag 'usb-4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
1 /*
2  * Copyright (C) 2016 Etnaviv Project
3   *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/platform_device.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/bitops.h>
22
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
29
30 #define MMUv2_PTE_PRESENT               BIT(0)
31 #define MMUv2_PTE_EXCEPTION             BIT(1)
32 #define MMUv2_PTE_WRITEABLE             BIT(2)
33
34 #define MMUv2_MTLB_MASK                 0xffc00000
35 #define MMUv2_MTLB_SHIFT                22
36 #define MMUv2_STLB_MASK                 0x003ff000
37 #define MMUv2_STLB_SHIFT                12
38
39 #define MMUv2_MAX_STLB_ENTRIES          1024
40
41 struct etnaviv_iommuv2_domain {
42         struct etnaviv_iommu_domain base;
43         /* P(age) T(able) A(rray) */
44         u64 *pta_cpu;
45         dma_addr_t pta_dma;
46         /* M(aster) TLB aka first level pagetable */
47         u32 *mtlb_cpu;
48         dma_addr_t mtlb_dma;
49         /* S(lave) TLB aka second level pagetable */
50         u32 *stlb_cpu[1024];
51         dma_addr_t stlb_dma[1024];
52 };
53
54 static struct etnaviv_iommuv2_domain *
55 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
56 {
57         return container_of(domain, struct etnaviv_iommuv2_domain, base);
58 }
59
60 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
61                                unsigned long iova, phys_addr_t paddr,
62                                size_t size, int prot)
63 {
64         struct etnaviv_iommuv2_domain *etnaviv_domain =
65                         to_etnaviv_domain(domain);
66         int mtlb_entry, stlb_entry;
67         u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
68
69         if (size != SZ_4K)
70                 return -EINVAL;
71
72         if (prot & ETNAVIV_PROT_WRITE)
73                 entry |= MMUv2_PTE_WRITEABLE;
74
75         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
76         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
77
78         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
79
80         return 0;
81 }
82
83 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
84                                     unsigned long iova, size_t size)
85 {
86         struct etnaviv_iommuv2_domain *etnaviv_domain =
87                         to_etnaviv_domain(domain);
88         int mtlb_entry, stlb_entry;
89
90         if (size != SZ_4K)
91                 return -EINVAL;
92
93         mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
94         stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
95
96         etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
97
98         return SZ_4K;
99 }
100
101 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
102 {
103         u32 *p;
104         int ret, i, j;
105
106         /* allocate scratch page */
107         etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
108                                                 etnaviv_domain->base.dev,
109                                                 SZ_4K,
110                                                 &etnaviv_domain->base.bad_page_dma,
111                                                 GFP_KERNEL);
112         if (!etnaviv_domain->base.bad_page_cpu) {
113                 ret = -ENOMEM;
114                 goto fail_mem;
115         }
116         p = etnaviv_domain->base.bad_page_cpu;
117         for (i = 0; i < SZ_4K / 4; i++)
118                 *p++ = 0xdead55aa;
119
120         etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
121                                                      SZ_4K,
122                                                      &etnaviv_domain->pta_dma,
123                                                      GFP_KERNEL);
124         if (!etnaviv_domain->pta_cpu) {
125                 ret = -ENOMEM;
126                 goto fail_mem;
127         }
128
129         etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
130                                                   SZ_4K,
131                                                   &etnaviv_domain->mtlb_dma,
132                                                   GFP_KERNEL);
133         if (!etnaviv_domain->mtlb_cpu) {
134                 ret = -ENOMEM;
135                 goto fail_mem;
136         }
137
138         /* pre-populate STLB pages (may want to switch to on-demand later) */
139         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
140                 etnaviv_domain->stlb_cpu[i] =
141                                 dma_alloc_coherent(etnaviv_domain->base.dev,
142                                                    SZ_4K,
143                                                    &etnaviv_domain->stlb_dma[i],
144                                                    GFP_KERNEL);
145                 if (!etnaviv_domain->stlb_cpu[i]) {
146                         ret = -ENOMEM;
147                         goto fail_mem;
148                 }
149                 p = etnaviv_domain->stlb_cpu[i];
150                 for (j = 0; j < SZ_4K / 4; j++)
151                         *p++ = MMUv2_PTE_EXCEPTION;
152
153                 etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
154                                               MMUv2_PTE_PRESENT;
155         }
156
157         return 0;
158
159 fail_mem:
160         if (etnaviv_domain->base.bad_page_cpu)
161                 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
162                                   etnaviv_domain->base.bad_page_cpu,
163                                   etnaviv_domain->base.bad_page_dma);
164
165         if (etnaviv_domain->pta_cpu)
166                 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
167                                   etnaviv_domain->pta_cpu,
168                                   etnaviv_domain->pta_dma);
169
170         if (etnaviv_domain->mtlb_cpu)
171                 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
172                                   etnaviv_domain->mtlb_cpu,
173                                   etnaviv_domain->mtlb_dma);
174
175         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
176                 if (etnaviv_domain->stlb_cpu[i])
177                         dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
178                                           etnaviv_domain->stlb_cpu[i],
179                                           etnaviv_domain->stlb_dma[i]);
180         }
181
182         return ret;
183 }
184
185 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
186 {
187         struct etnaviv_iommuv2_domain *etnaviv_domain =
188                         to_etnaviv_domain(domain);
189         int i;
190
191         dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
192                           etnaviv_domain->base.bad_page_cpu,
193                           etnaviv_domain->base.bad_page_dma);
194
195         dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
196                           etnaviv_domain->pta_cpu,
197                           etnaviv_domain->pta_dma);
198
199         dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
200                           etnaviv_domain->mtlb_cpu,
201                           etnaviv_domain->mtlb_dma);
202
203         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
204                 if (etnaviv_domain->stlb_cpu[i])
205                         dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
206                                           etnaviv_domain->stlb_cpu[i],
207                                           etnaviv_domain->stlb_dma[i]);
208         }
209
210         vfree(etnaviv_domain);
211 }
212
213 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
214 {
215         struct etnaviv_iommuv2_domain *etnaviv_domain =
216                         to_etnaviv_domain(domain);
217         size_t dump_size = SZ_4K;
218         int i;
219
220         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
221                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
222                         dump_size += SZ_4K;
223
224         return dump_size;
225 }
226
227 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
228 {
229         struct etnaviv_iommuv2_domain *etnaviv_domain =
230                         to_etnaviv_domain(domain);
231         int i;
232
233         memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
234         buf += SZ_4K;
235         for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
236                 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
237                         memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
238 }
239
240 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
241 {
242         struct etnaviv_iommuv2_domain *etnaviv_domain =
243                         to_etnaviv_domain(gpu->mmu->domain);
244         u16 prefetch;
245
246         /* If the MMU is already enabled the state is still there. */
247         if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
248                 return;
249
250         prefetch = etnaviv_buffer_config_mmuv2(gpu,
251                                 (u32)etnaviv_domain->mtlb_dma,
252                                 (u32)etnaviv_domain->base.bad_page_dma);
253         etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
254                              prefetch);
255         etnaviv_gpu_wait_idle(gpu, 100);
256
257         gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
258 }
259
260 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
261 {
262         struct etnaviv_iommuv2_domain *etnaviv_domain =
263                                 to_etnaviv_domain(gpu->mmu->domain);
264         u16 prefetch;
265
266         /* If the MMU is already enabled the state is still there. */
267         if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
268                 return;
269
270         gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
271                   lower_32_bits(etnaviv_domain->pta_dma));
272         gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
273                   upper_32_bits(etnaviv_domain->pta_dma));
274         gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
275
276         gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
277                   lower_32_bits(etnaviv_domain->base.bad_page_dma));
278         gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
279                   lower_32_bits(etnaviv_domain->base.bad_page_dma));
280         gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
281                   VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
282                   upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
283                   VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
284                   upper_32_bits(etnaviv_domain->base.bad_page_dma)));
285
286         etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
287                                      VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
288
289         /* trigger a PTA load through the FE */
290         prefetch = etnaviv_buffer_config_pta(gpu);
291         etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
292                              prefetch);
293         etnaviv_gpu_wait_idle(gpu, 100);
294
295         gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
296 }
297
298 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
299 {
300         switch (gpu->sec_mode) {
301         case ETNA_SEC_NONE:
302                 etnaviv_iommuv2_restore_nonsec(gpu);
303                 break;
304         case ETNA_SEC_KERNEL:
305                 etnaviv_iommuv2_restore_sec(gpu);
306                 break;
307         default:
308                 WARN(1, "unhandled GPU security mode\n");
309                 break;
310         }
311 }
312
313 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
314         .free = etnaviv_iommuv2_domain_free,
315         .map = etnaviv_iommuv2_map,
316         .unmap = etnaviv_iommuv2_unmap,
317         .dump_size = etnaviv_iommuv2_dump_size,
318         .dump = etnaviv_iommuv2_dump,
319 };
320
321 struct etnaviv_iommu_domain *
322 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
323 {
324         struct etnaviv_iommuv2_domain *etnaviv_domain;
325         struct etnaviv_iommu_domain *domain;
326         int ret;
327
328         etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
329         if (!etnaviv_domain)
330                 return NULL;
331
332         domain = &etnaviv_domain->base;
333
334         domain->dev = gpu->dev;
335         domain->base = 0;
336         domain->size = (u64)SZ_1G * 4;
337         domain->ops = &etnaviv_iommuv2_ops;
338
339         ret = etnaviv_iommuv2_init(etnaviv_domain);
340         if (ret)
341                 goto out_free;
342
343         return &etnaviv_domain->base;
344
345 out_free:
346         vfree(etnaviv_domain);
347         return NULL;
348 }