4ada190544434f37212fcf56232fc2f794dbedee
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu.c
1 /*
2  * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/platform_device.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/bitops.h>
22
23 #include "etnaviv_gpu.h"
24 #include "etnaviv_mmu.h"
25 #include "etnaviv_iommu.h"
26 #include "state_hi.xml.h"
27
28 #define PT_SIZE         SZ_2M
29 #define PT_ENTRIES      (PT_SIZE / sizeof(u32))
30
31 #define GPU_MEM_START   0x80000000
32
33 struct etnaviv_iommuv1_domain {
34         struct etnaviv_iommu_domain base;
35         u32 *pgtable_cpu;
36         dma_addr_t pgtable_dma;
37 };
38
39 static struct etnaviv_iommuv1_domain *
40 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
41 {
42         return container_of(domain, struct etnaviv_iommuv1_domain, base);
43 }
44
45 static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
46 {
47         u32 *p;
48         int i;
49
50         etnaviv_domain->base.bad_page_cpu =
51                         dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
52                                      &etnaviv_domain->base.bad_page_dma,
53                                      GFP_KERNEL);
54         if (!etnaviv_domain->base.bad_page_cpu)
55                 return -ENOMEM;
56
57         p = etnaviv_domain->base.bad_page_cpu;
58         for (i = 0; i < SZ_4K / 4; i++)
59                 *p++ = 0xdead55aa;
60
61         etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
62                                                    PT_SIZE,
63                                                    &etnaviv_domain->pgtable_dma,
64                                                    GFP_KERNEL);
65         if (!etnaviv_domain->pgtable_cpu) {
66                 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
67                             etnaviv_domain->base.bad_page_cpu,
68                             etnaviv_domain->base.bad_page_dma);
69                 return -ENOMEM;
70         }
71
72         memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
73                  PT_ENTRIES);
74
75         return 0;
76 }
77
78 static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
79 {
80         struct etnaviv_iommuv1_domain *etnaviv_domain =
81                         to_etnaviv_domain(domain);
82
83         dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
84                     etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
85
86         dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
87                     etnaviv_domain->base.bad_page_cpu,
88                     etnaviv_domain->base.bad_page_dma);
89
90         kfree(etnaviv_domain);
91 }
92
93 static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
94                                unsigned long iova, phys_addr_t paddr,
95                                size_t size, int prot)
96 {
97         struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
98         unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
99
100         if (size != SZ_4K)
101                 return -EINVAL;
102
103         etnaviv_domain->pgtable_cpu[index] = paddr;
104
105         return 0;
106 }
107
108 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
109         unsigned long iova, size_t size)
110 {
111         struct etnaviv_iommuv1_domain *etnaviv_domain =
112                         to_etnaviv_domain(domain);
113         unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
114
115         if (size != SZ_4K)
116                 return -EINVAL;
117
118         etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
119
120         return SZ_4K;
121 }
122
123 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
124 {
125         return PT_SIZE;
126 }
127
128 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
129 {
130         struct etnaviv_iommuv1_domain *etnaviv_domain =
131                         to_etnaviv_domain(domain);
132
133         memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
134 }
135
136 void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
137 {
138         struct etnaviv_iommuv1_domain *etnaviv_domain =
139                         to_etnaviv_domain(gpu->mmu->domain);
140         u32 pgtable;
141
142         /* set base addresses */
143         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
144         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
145         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
146         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
147         gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
148
149         /* set page table address in MC */
150         pgtable = (u32)etnaviv_domain->pgtable_dma;
151
152         gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
153         gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
154         gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
155         gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
156         gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
157 }
158
159 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
160         .free = etnaviv_iommuv1_domain_free,
161         .map = etnaviv_iommuv1_map,
162         .unmap = etnaviv_iommuv1_unmap,
163         .dump_size = etnaviv_iommuv1_dump_size,
164         .dump = etnaviv_iommuv1_dump,
165 };
166
167 struct etnaviv_iommu_domain *
168 etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
169 {
170         struct etnaviv_iommuv1_domain *etnaviv_domain;
171         struct etnaviv_iommu_domain *domain;
172         int ret;
173
174         etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
175         if (!etnaviv_domain)
176                 return NULL;
177
178         domain = &etnaviv_domain->base;
179
180         domain->dev = gpu->dev;
181         domain->base = GPU_MEM_START;
182         domain->size = PT_ENTRIES * SZ_4K;
183         domain->ops = &etnaviv_iommuv1_ops;
184
185         ret = __etnaviv_iommu_init(etnaviv_domain);
186         if (ret)
187                 goto out_free;
188
189         return &etnaviv_domain->base;
190
191 out_free:
192         kfree(etnaviv_domain);
193         return NULL;
194 }