drm/etnaviv: move MMU context ref/unref into map/unmap_gem
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17                                  unsigned long iova, size_t size)
18 {
19         size_t unmapped_page, unmapped = 0;
20         size_t pgsize = SZ_4K;
21
22         if (!IS_ALIGNED(iova | size, pgsize)) {
23                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24                        iova, size, pgsize);
25                 return;
26         }
27
28         while (unmapped < size) {
29                 unmapped_page = context->global->ops->unmap(context, iova,
30                                                             pgsize);
31                 if (!unmapped_page)
32                         break;
33
34                 iova += unmapped_page;
35                 unmapped += unmapped_page;
36         }
37 }
38
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40                               unsigned long iova, phys_addr_t paddr,
41                               size_t size, int prot)
42 {
43         unsigned long orig_iova = iova;
44         size_t pgsize = SZ_4K;
45         size_t orig_size = size;
46         int ret = 0;
47
48         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50                        iova, &paddr, size, pgsize);
51                 return -EINVAL;
52         }
53
54         while (size) {
55                 ret = context->global->ops->map(context, iova, paddr, pgsize,
56                                                 prot);
57                 if (ret)
58                         break;
59
60                 iova += pgsize;
61                 paddr += pgsize;
62                 size -= pgsize;
63         }
64
65         /* unroll mapping in case something went wrong */
66         if (ret)
67                 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69         return ret;
70 }
71
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73                              struct sg_table *sgt, unsigned len, int prot)
74 {       struct scatterlist *sg;
75         unsigned int da = iova;
76         unsigned int i;
77         int ret;
78
79         if (!context || !sgt)
80                 return -EINVAL;
81
82         for_each_sgtable_dma_sg(sgt, sg, i) {
83                 u32 pa = sg_dma_address(sg) - sg->offset;
84                 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88                 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89                 if (ret)
90                         goto fail;
91
92                 da += bytes;
93         }
94
95         return 0;
96
97 fail:
98         etnaviv_context_unmap(context, iova, da - iova);
99         return ret;
100 }
101
102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103                                 struct sg_table *sgt, unsigned len)
104 {
105         struct scatterlist *sg;
106         unsigned int da = iova;
107         int i;
108
109         for_each_sgtable_dma_sg(sgt, sg, i) {
110                 size_t bytes = sg_dma_len(sg) + sg->offset;
111
112                 etnaviv_context_unmap(context, da, bytes);
113
114                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115
116                 BUG_ON(!PAGE_ALIGNED(bytes));
117
118                 da += bytes;
119         }
120 }
121
122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123         struct etnaviv_vram_mapping *mapping)
124 {
125         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126
127         lockdep_assert_held(&context->lock);
128
129         etnaviv_iommu_unmap(context, mapping->vram_node.start,
130                             etnaviv_obj->sgt, etnaviv_obj->base.size);
131         drm_mm_remove_node(&mapping->vram_node);
132 }
133
134 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
135                                    struct drm_mm_node *node, size_t size)
136 {
137         struct etnaviv_vram_mapping *free = NULL;
138         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
139         int ret;
140
141         lockdep_assert_held(&context->lock);
142
143         while (1) {
144                 struct etnaviv_vram_mapping *m, *n;
145                 struct drm_mm_scan scan;
146                 struct list_head list;
147                 bool found;
148
149                 ret = drm_mm_insert_node_in_range(&context->mm, node,
150                                                   size, 0, 0, 0, U64_MAX, mode);
151                 if (ret != -ENOSPC)
152                         break;
153
154                 /* Try to retire some entries */
155                 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
156
157                 found = 0;
158                 INIT_LIST_HEAD(&list);
159                 list_for_each_entry(free, &context->mappings, mmu_node) {
160                         /* If this vram node has not been used, skip this. */
161                         if (!free->vram_node.mm)
162                                 continue;
163
164                         /*
165                          * If the iova is pinned, then it's in-use,
166                          * so we must keep its mapping.
167                          */
168                         if (free->use)
169                                 continue;
170
171                         list_add(&free->scan_node, &list);
172                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
173                                 found = true;
174                                 break;
175                         }
176                 }
177
178                 if (!found) {
179                         /* Nothing found, clean up and fail */
180                         list_for_each_entry_safe(m, n, &list, scan_node)
181                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
182                         break;
183                 }
184
185                 /*
186                  * drm_mm does not allow any other operations while
187                  * scanning, so we have to remove all blocks first.
188                  * If drm_mm_scan_remove_block() returns false, we
189                  * can leave the block pinned.
190                  */
191                 list_for_each_entry_safe(m, n, &list, scan_node)
192                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
193                                 list_del_init(&m->scan_node);
194
195                 /*
196                  * Unmap the blocks which need to be reaped from the MMU.
197                  * Clear the mmu pointer to prevent the mapping_get finding
198                  * this mapping.
199                  */
200                 list_for_each_entry_safe(m, n, &list, scan_node) {
201                         etnaviv_iommu_remove_mapping(context, m);
202                         etnaviv_iommu_context_put(m->context);
203                         m->context = NULL;
204                         list_del_init(&m->mmu_node);
205                         list_del_init(&m->scan_node);
206                 }
207
208                 mode = DRM_MM_INSERT_EVICT;
209
210                 /*
211                  * We removed enough mappings so that the new allocation will
212                  * succeed, retry the allocation one more time.
213                  */
214         }
215
216         return ret;
217 }
218
219 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
220                    struct drm_mm_node *node, size_t size, u64 va)
221 {
222         lockdep_assert_held(&context->lock);
223
224         return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
225                                            va + size, DRM_MM_INSERT_LOWEST);
226 }
227
228 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
229         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
230         struct etnaviv_vram_mapping *mapping, u64 va)
231 {
232         struct sg_table *sgt = etnaviv_obj->sgt;
233         struct drm_mm_node *node;
234         int ret;
235
236         lockdep_assert_held(&etnaviv_obj->lock);
237
238         mutex_lock(&context->lock);
239
240         /* v1 MMU can optimize single entry (contiguous) scatterlists */
241         if (context->global->version == ETNAVIV_IOMMU_V1 &&
242             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
243                 u32 iova;
244
245                 iova = sg_dma_address(sgt->sgl) - memory_base;
246                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
247                         mapping->iova = iova;
248                         mapping->context = etnaviv_iommu_context_get(context);
249                         list_add_tail(&mapping->mmu_node, &context->mappings);
250                         ret = 0;
251                         goto unlock;
252                 }
253         }
254
255         node = &mapping->vram_node;
256
257         if (va)
258                 ret = etnaviv_iommu_insert_exact(context, node,
259                                                  etnaviv_obj->base.size, va);
260         else
261                 ret = etnaviv_iommu_find_iova(context, node,
262                                               etnaviv_obj->base.size);
263         if (ret < 0)
264                 goto unlock;
265
266         mapping->iova = node->start;
267         ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
268                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
269
270         if (ret < 0) {
271                 drm_mm_remove_node(node);
272                 goto unlock;
273         }
274
275         mapping->context = etnaviv_iommu_context_get(context);
276         list_add_tail(&mapping->mmu_node, &context->mappings);
277         context->flush_seq++;
278 unlock:
279         mutex_unlock(&context->lock);
280
281         return ret;
282 }
283
284 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
285         struct etnaviv_vram_mapping *mapping)
286 {
287         WARN_ON(mapping->use);
288
289         mutex_lock(&context->lock);
290
291         /* Bail if the mapping has been reaped by another thread */
292         if (!mapping->context) {
293                 mutex_unlock(&context->lock);
294                 return;
295         }
296
297         /* If the vram node is on the mm, unmap and remove the node */
298         if (mapping->vram_node.mm == &context->mm)
299                 etnaviv_iommu_remove_mapping(context, mapping);
300
301         list_del(&mapping->mmu_node);
302         context->flush_seq++;
303         mutex_unlock(&context->lock);
304         etnaviv_iommu_context_put(context);
305 }
306
307 static void etnaviv_iommu_context_free(struct kref *kref)
308 {
309         struct etnaviv_iommu_context *context =
310                 container_of(kref, struct etnaviv_iommu_context, refcount);
311
312         etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
313
314         context->global->ops->free(context);
315 }
316 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
317 {
318         kref_put(&context->refcount, etnaviv_iommu_context_free);
319 }
320
321 struct etnaviv_iommu_context *
322 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
323                            struct etnaviv_cmdbuf_suballoc *suballoc)
324 {
325         struct etnaviv_iommu_context *ctx;
326         int ret;
327
328         if (global->version == ETNAVIV_IOMMU_V1)
329                 ctx = etnaviv_iommuv1_context_alloc(global);
330         else
331                 ctx = etnaviv_iommuv2_context_alloc(global);
332
333         if (!ctx)
334                 return NULL;
335
336         ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
337                                           global->memory_base);
338         if (ret)
339                 goto out_free;
340
341         if (global->version == ETNAVIV_IOMMU_V1 &&
342             ctx->cmdbuf_mapping.iova > 0x80000000) {
343                 dev_err(global->dev,
344                         "command buffer outside valid memory window\n");
345                 goto out_unmap;
346         }
347
348         return ctx;
349
350 out_unmap:
351         etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
352 out_free:
353         global->ops->free(ctx);
354         return NULL;
355 }
356
357 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
358                            struct etnaviv_iommu_context *context)
359 {
360         context->global->ops->restore(gpu, context);
361 }
362
363 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
364                                   struct etnaviv_vram_mapping *mapping,
365                                   u32 memory_base, dma_addr_t paddr,
366                                   size_t size)
367 {
368         mutex_lock(&context->lock);
369
370         if (mapping->use > 0) {
371                 mapping->use++;
372                 mutex_unlock(&context->lock);
373                 return 0;
374         }
375
376         /*
377          * For MMUv1 we don't add the suballoc region to the pagetables, as
378          * those GPUs can only work with cmdbufs accessed through the linear
379          * window. Instead we manufacture a mapping to make it look uniform
380          * to the upper layers.
381          */
382         if (context->global->version == ETNAVIV_IOMMU_V1) {
383                 mapping->iova = paddr - memory_base;
384         } else {
385                 struct drm_mm_node *node = &mapping->vram_node;
386                 int ret;
387
388                 ret = etnaviv_iommu_find_iova(context, node, size);
389                 if (ret < 0) {
390                         mutex_unlock(&context->lock);
391                         return ret;
392                 }
393
394                 mapping->iova = node->start;
395                 ret = etnaviv_context_map(context, node->start, paddr, size,
396                                           ETNAVIV_PROT_READ);
397                 if (ret < 0) {
398                         drm_mm_remove_node(node);
399                         mutex_unlock(&context->lock);
400                         return ret;
401                 }
402
403                 context->flush_seq++;
404         }
405
406         list_add_tail(&mapping->mmu_node, &context->mappings);
407         mapping->use = 1;
408
409         mutex_unlock(&context->lock);
410
411         return 0;
412 }
413
414 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
415                   struct etnaviv_vram_mapping *mapping)
416 {
417         struct drm_mm_node *node = &mapping->vram_node;
418
419         mutex_lock(&context->lock);
420         mapping->use--;
421
422         if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
423                 mutex_unlock(&context->lock);
424                 return;
425         }
426
427         etnaviv_context_unmap(context, node->start, node->size);
428         drm_mm_remove_node(node);
429         mutex_unlock(&context->lock);
430 }
431
432 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
433 {
434         return context->global->ops->dump_size(context);
435 }
436
437 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
438 {
439         context->global->ops->dump(context, buf);
440 }
441
442 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
443 {
444         enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
445         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
446         struct etnaviv_iommu_global *global;
447         struct device *dev = gpu->drm->dev;
448
449         if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
450                 version = ETNAVIV_IOMMU_V2;
451
452         if (priv->mmu_global) {
453                 if (priv->mmu_global->version != version) {
454                         dev_err(gpu->dev,
455                                 "MMU version doesn't match global version\n");
456                         return -ENXIO;
457                 }
458
459                 priv->mmu_global->use++;
460                 return 0;
461         }
462
463         global = kzalloc(sizeof(*global), GFP_KERNEL);
464         if (!global)
465                 return -ENOMEM;
466
467         global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
468                                             GFP_KERNEL);
469         if (!global->bad_page_cpu)
470                 goto free_global;
471
472         memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
473
474         if (version == ETNAVIV_IOMMU_V2) {
475                 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
476                                                &global->v2.pta_dma, GFP_KERNEL);
477                 if (!global->v2.pta_cpu)
478                         goto free_bad_page;
479         }
480
481         global->dev = dev;
482         global->version = version;
483         global->use = 1;
484         mutex_init(&global->lock);
485
486         if (version == ETNAVIV_IOMMU_V1)
487                 global->ops = &etnaviv_iommuv1_ops;
488         else
489                 global->ops = &etnaviv_iommuv2_ops;
490
491         priv->mmu_global = global;
492
493         return 0;
494
495 free_bad_page:
496         dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
497 free_global:
498         kfree(global);
499
500         return -ENOMEM;
501 }
502
503 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
504 {
505         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
506         struct etnaviv_iommu_global *global = priv->mmu_global;
507
508         if (--global->use > 0)
509                 return;
510
511         if (global->v2.pta_cpu)
512                 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
513                             global->v2.pta_cpu, global->v2.pta_dma);
514
515         if (global->bad_page_cpu)
516                 dma_free_wc(global->dev, SZ_4K,
517                             global->bad_page_cpu, global->bad_page_dma);
518
519         mutex_destroy(&global->lock);
520         kfree(global);
521
522         priv->mmu_global = NULL;
523 }