2 * Copyright (C) 2016 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/platform_device.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/bitops.h>
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
30 #define MMUv2_PTE_PRESENT BIT(0)
31 #define MMUv2_PTE_EXCEPTION BIT(1)
32 #define MMUv2_PTE_WRITEABLE BIT(2)
34 #define MMUv2_MTLB_MASK 0xffc00000
35 #define MMUv2_MTLB_SHIFT 22
36 #define MMUv2_STLB_MASK 0x003ff000
37 #define MMUv2_STLB_SHIFT 12
39 #define MMUv2_MAX_STLB_ENTRIES 1024
41 struct etnaviv_iommuv2_domain {
42 struct etnaviv_iommu_domain base;
43 /* P(age) T(able) A(rray) */
46 /* M(aster) TLB aka first level pagetable */
49 /* S(lave) TLB aka second level pagetable */
50 u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
51 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
54 static struct etnaviv_iommuv2_domain *
55 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
57 return container_of(domain, struct etnaviv_iommuv2_domain, base);
61 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
64 if (etnaviv_domain->stlb_cpu[stlb])
67 etnaviv_domain->stlb_cpu[stlb] =
68 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
69 &etnaviv_domain->stlb_dma[stlb],
72 if (!etnaviv_domain->stlb_cpu[stlb])
75 memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
78 etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
83 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
84 unsigned long iova, phys_addr_t paddr,
85 size_t size, int prot)
87 struct etnaviv_iommuv2_domain *etnaviv_domain =
88 to_etnaviv_domain(domain);
89 int mtlb_entry, stlb_entry, ret;
90 u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
95 if (prot & ETNAVIV_PROT_WRITE)
96 entry |= MMUv2_PTE_WRITEABLE;
98 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
99 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
101 ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
105 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
110 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
111 unsigned long iova, size_t size)
113 struct etnaviv_iommuv2_domain *etnaviv_domain =
114 to_etnaviv_domain(domain);
115 int mtlb_entry, stlb_entry;
120 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
121 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
123 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
128 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
133 /* allocate scratch page */
134 etnaviv_domain->base.bad_page_cpu =
135 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
136 &etnaviv_domain->base.bad_page_dma,
138 if (!etnaviv_domain->base.bad_page_cpu) {
142 p = etnaviv_domain->base.bad_page_cpu;
143 for (i = 0; i < SZ_4K / 4; i++)
146 etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
147 SZ_4K, &etnaviv_domain->pta_dma,
149 if (!etnaviv_domain->pta_cpu) {
154 etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
155 SZ_4K, &etnaviv_domain->mtlb_dma,
157 if (!etnaviv_domain->mtlb_cpu) {
162 memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
163 MMUv2_MAX_STLB_ENTRIES);
168 if (etnaviv_domain->base.bad_page_cpu)
169 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
170 etnaviv_domain->base.bad_page_cpu,
171 etnaviv_domain->base.bad_page_dma);
173 if (etnaviv_domain->pta_cpu)
174 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
175 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
177 if (etnaviv_domain->mtlb_cpu)
178 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
179 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
184 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
186 struct etnaviv_iommuv2_domain *etnaviv_domain =
187 to_etnaviv_domain(domain);
190 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
191 etnaviv_domain->base.bad_page_cpu,
192 etnaviv_domain->base.bad_page_dma);
194 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
195 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
197 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
198 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
200 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
201 if (etnaviv_domain->stlb_cpu[i])
202 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
203 etnaviv_domain->stlb_cpu[i],
204 etnaviv_domain->stlb_dma[i]);
207 vfree(etnaviv_domain);
210 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
212 struct etnaviv_iommuv2_domain *etnaviv_domain =
213 to_etnaviv_domain(domain);
214 size_t dump_size = SZ_4K;
217 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
218 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
224 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
226 struct etnaviv_iommuv2_domain *etnaviv_domain =
227 to_etnaviv_domain(domain);
230 memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
232 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
233 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
234 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
237 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
239 struct etnaviv_iommuv2_domain *etnaviv_domain =
240 to_etnaviv_domain(gpu->mmu->domain);
243 /* If the MMU is already enabled the state is still there. */
244 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
247 prefetch = etnaviv_buffer_config_mmuv2(gpu,
248 (u32)etnaviv_domain->mtlb_dma,
249 (u32)etnaviv_domain->base.bad_page_dma);
250 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
252 etnaviv_gpu_wait_idle(gpu, 100);
254 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
257 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
259 struct etnaviv_iommuv2_domain *etnaviv_domain =
260 to_etnaviv_domain(gpu->mmu->domain);
263 /* If the MMU is already enabled the state is still there. */
264 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
267 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
268 lower_32_bits(etnaviv_domain->pta_dma));
269 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
270 upper_32_bits(etnaviv_domain->pta_dma));
271 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
273 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
274 lower_32_bits(etnaviv_domain->base.bad_page_dma));
275 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
276 lower_32_bits(etnaviv_domain->base.bad_page_dma));
277 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
278 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
279 upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
280 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
281 upper_32_bits(etnaviv_domain->base.bad_page_dma)));
283 etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
284 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
286 /* trigger a PTA load through the FE */
287 prefetch = etnaviv_buffer_config_pta(gpu);
288 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
290 etnaviv_gpu_wait_idle(gpu, 100);
292 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
295 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
297 switch (gpu->sec_mode) {
299 etnaviv_iommuv2_restore_nonsec(gpu);
301 case ETNA_SEC_KERNEL:
302 etnaviv_iommuv2_restore_sec(gpu);
305 WARN(1, "unhandled GPU security mode\n");
310 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
311 .free = etnaviv_iommuv2_domain_free,
312 .map = etnaviv_iommuv2_map,
313 .unmap = etnaviv_iommuv2_unmap,
314 .dump_size = etnaviv_iommuv2_dump_size,
315 .dump = etnaviv_iommuv2_dump,
318 struct etnaviv_iommu_domain *
319 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
321 struct etnaviv_iommuv2_domain *etnaviv_domain;
322 struct etnaviv_iommu_domain *domain;
325 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
329 domain = &etnaviv_domain->base;
331 domain->dev = gpu->dev;
333 domain->size = (u64)SZ_1G * 4;
334 domain->ops = &etnaviv_iommuv2_ops;
336 ret = etnaviv_iommuv2_init(etnaviv_domain);
340 return &etnaviv_domain->base;
343 vfree(etnaviv_domain);