drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17                                  unsigned long iova, size_t size)
18 {
19         size_t unmapped_page, unmapped = 0;
20         size_t pgsize = SZ_4K;
21
22         if (!IS_ALIGNED(iova | size, pgsize)) {
23                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24                        iova, size, pgsize);
25                 return;
26         }
27
28         while (unmapped < size) {
29                 unmapped_page = context->global->ops->unmap(context, iova,
30                                                             pgsize);
31                 if (!unmapped_page)
32                         break;
33
34                 iova += unmapped_page;
35                 unmapped += unmapped_page;
36         }
37 }
38
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40                               unsigned long iova, phys_addr_t paddr,
41                               size_t size, int prot)
42 {
43         unsigned long orig_iova = iova;
44         size_t pgsize = SZ_4K;
45         size_t orig_size = size;
46         int ret = 0;
47
48         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50                        iova, &paddr, size, pgsize);
51                 return -EINVAL;
52         }
53
54         while (size) {
55                 ret = context->global->ops->map(context, iova, paddr, pgsize,
56                                                 prot);
57                 if (ret)
58                         break;
59
60                 iova += pgsize;
61                 paddr += pgsize;
62                 size -= pgsize;
63         }
64
65         /* unroll mapping in case something went wrong */
66         if (ret)
67                 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69         return ret;
70 }
71
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73                              struct sg_table *sgt, unsigned len, int prot)
74 {       struct scatterlist *sg;
75         unsigned int da = iova;
76         unsigned int i;
77         int ret;
78
79         if (!context || !sgt)
80                 return -EINVAL;
81
82         for_each_sgtable_dma_sg(sgt, sg, i) {
83                 phys_addr_t pa = sg_dma_address(sg) - sg->offset;
84                 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86                 VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
87
88                 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89                 if (ret)
90                         goto fail;
91
92                 da += bytes;
93         }
94
95         context->flush_seq++;
96
97         return 0;
98
99 fail:
100         etnaviv_context_unmap(context, iova, da - iova);
101         return ret;
102 }
103
104 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
105                                 struct sg_table *sgt, unsigned len)
106 {
107         struct scatterlist *sg;
108         unsigned int da = iova;
109         int i;
110
111         for_each_sgtable_dma_sg(sgt, sg, i) {
112                 size_t bytes = sg_dma_len(sg) + sg->offset;
113
114                 etnaviv_context_unmap(context, da, bytes);
115
116                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
117
118                 BUG_ON(!PAGE_ALIGNED(bytes));
119
120                 da += bytes;
121         }
122
123         context->flush_seq++;
124 }
125
126 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
127         struct etnaviv_vram_mapping *mapping)
128 {
129         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
130
131         lockdep_assert_held(&context->lock);
132
133         etnaviv_iommu_unmap(context, mapping->vram_node.start,
134                             etnaviv_obj->sgt, etnaviv_obj->base.size);
135         drm_mm_remove_node(&mapping->vram_node);
136 }
137
138 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
139 {
140         struct etnaviv_iommu_context *context = mapping->context;
141
142         lockdep_assert_held(&context->lock);
143         WARN_ON(mapping->use);
144
145         etnaviv_iommu_remove_mapping(context, mapping);
146         etnaviv_iommu_context_put(mapping->context);
147         mapping->context = NULL;
148         list_del_init(&mapping->mmu_node);
149 }
150
151 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
152                                    struct drm_mm_node *node, size_t size)
153 {
154         struct etnaviv_vram_mapping *free = NULL;
155         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
156         int ret;
157
158         lockdep_assert_held(&context->lock);
159
160         while (1) {
161                 struct etnaviv_vram_mapping *m, *n;
162                 struct drm_mm_scan scan;
163                 struct list_head list;
164                 bool found;
165
166                 ret = drm_mm_insert_node_in_range(&context->mm, node,
167                                                   size, 0, 0, 0, U64_MAX, mode);
168                 if (ret != -ENOSPC)
169                         break;
170
171                 /* Try to retire some entries */
172                 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
173
174                 found = 0;
175                 INIT_LIST_HEAD(&list);
176                 list_for_each_entry(free, &context->mappings, mmu_node) {
177                         /* If this vram node has not been used, skip this. */
178                         if (!free->vram_node.mm)
179                                 continue;
180
181                         /*
182                          * If the iova is pinned, then it's in-use,
183                          * so we must keep its mapping.
184                          */
185                         if (free->use)
186                                 continue;
187
188                         list_add(&free->scan_node, &list);
189                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
190                                 found = true;
191                                 break;
192                         }
193                 }
194
195                 if (!found) {
196                         /* Nothing found, clean up and fail */
197                         list_for_each_entry_safe(m, n, &list, scan_node)
198                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
199                         break;
200                 }
201
202                 /*
203                  * drm_mm does not allow any other operations while
204                  * scanning, so we have to remove all blocks first.
205                  * If drm_mm_scan_remove_block() returns false, we
206                  * can leave the block pinned.
207                  */
208                 list_for_each_entry_safe(m, n, &list, scan_node)
209                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
210                                 list_del_init(&m->scan_node);
211
212                 /*
213                  * Unmap the blocks which need to be reaped from the MMU.
214                  * Clear the mmu pointer to prevent the mapping_get finding
215                  * this mapping.
216                  */
217                 list_for_each_entry_safe(m, n, &list, scan_node) {
218                         etnaviv_iommu_reap_mapping(m);
219                         list_del_init(&m->scan_node);
220                 }
221
222                 mode = DRM_MM_INSERT_EVICT;
223
224                 /*
225                  * We removed enough mappings so that the new allocation will
226                  * succeed, retry the allocation one more time.
227                  */
228         }
229
230         return ret;
231 }
232
233 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
234                    struct drm_mm_node *node, size_t size, u64 va)
235 {
236         struct etnaviv_vram_mapping *m, *n;
237         struct drm_mm_node *scan_node;
238         LIST_HEAD(scan_list);
239         int ret;
240
241         lockdep_assert_held(&context->lock);
242
243         ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
244                                           va + size, DRM_MM_INSERT_LOWEST);
245         if (ret != -ENOSPC)
246                 return ret;
247
248         /*
249          * When we can't insert the node, due to a existing mapping blocking
250          * the address space, there are two possible reasons:
251          * 1. Userspace genuinely messed up and tried to reuse address space
252          * before the last job using this VMA has finished executing.
253          * 2. The existing buffer mappings are idle, but the buffers are not
254          * destroyed yet (likely due to being referenced by another context) in
255          * which case the mappings will not be cleaned up and we must reap them
256          * here to make space for the new mapping.
257          */
258
259         drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
260                 m = container_of(scan_node, struct etnaviv_vram_mapping,
261                                  vram_node);
262
263                 if (m->use)
264                         return -ENOSPC;
265
266                 list_add(&m->scan_node, &scan_list);
267         }
268
269         list_for_each_entry_safe(m, n, &scan_list, scan_node) {
270                 etnaviv_iommu_reap_mapping(m);
271                 list_del_init(&m->scan_node);
272         }
273
274         return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
275                                            va + size, DRM_MM_INSERT_LOWEST);
276 }
277
278 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
279         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
280         struct etnaviv_vram_mapping *mapping, u64 va)
281 {
282         struct sg_table *sgt = etnaviv_obj->sgt;
283         struct drm_mm_node *node;
284         int ret;
285
286         lockdep_assert_held(&etnaviv_obj->lock);
287
288         mutex_lock(&context->lock);
289
290         /* v1 MMU can optimize single entry (contiguous) scatterlists */
291         if (context->global->version == ETNAVIV_IOMMU_V1 &&
292             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
293                 u32 iova;
294
295                 iova = sg_dma_address(sgt->sgl) - memory_base;
296                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
297                         mapping->iova = iova;
298                         mapping->context = etnaviv_iommu_context_get(context);
299                         list_add_tail(&mapping->mmu_node, &context->mappings);
300                         ret = 0;
301                         goto unlock;
302                 }
303         }
304
305         node = &mapping->vram_node;
306
307         if (va)
308                 ret = etnaviv_iommu_insert_exact(context, node,
309                                                  etnaviv_obj->base.size, va);
310         else
311                 ret = etnaviv_iommu_find_iova(context, node,
312                                               etnaviv_obj->base.size);
313         if (ret < 0)
314                 goto unlock;
315
316         mapping->iova = node->start;
317         ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
318                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
319
320         if (ret < 0) {
321                 drm_mm_remove_node(node);
322                 goto unlock;
323         }
324
325         mapping->context = etnaviv_iommu_context_get(context);
326         list_add_tail(&mapping->mmu_node, &context->mappings);
327 unlock:
328         mutex_unlock(&context->lock);
329
330         return ret;
331 }
332
333 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
334         struct etnaviv_vram_mapping *mapping)
335 {
336         WARN_ON(mapping->use);
337
338         mutex_lock(&context->lock);
339
340         /* Bail if the mapping has been reaped by another thread */
341         if (!mapping->context) {
342                 mutex_unlock(&context->lock);
343                 return;
344         }
345
346         /* If the vram node is on the mm, unmap and remove the node */
347         if (mapping->vram_node.mm == &context->mm)
348                 etnaviv_iommu_remove_mapping(context, mapping);
349
350         list_del(&mapping->mmu_node);
351         mutex_unlock(&context->lock);
352         etnaviv_iommu_context_put(context);
353 }
354
355 static void etnaviv_iommu_context_free(struct kref *kref)
356 {
357         struct etnaviv_iommu_context *context =
358                 container_of(kref, struct etnaviv_iommu_context, refcount);
359
360         etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
361
362         context->global->ops->free(context);
363 }
364 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
365 {
366         kref_put(&context->refcount, etnaviv_iommu_context_free);
367 }
368
369 struct etnaviv_iommu_context *
370 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
371                            struct etnaviv_cmdbuf_suballoc *suballoc)
372 {
373         struct etnaviv_iommu_context *ctx;
374         int ret;
375
376         if (global->version == ETNAVIV_IOMMU_V1)
377                 ctx = etnaviv_iommuv1_context_alloc(global);
378         else
379                 ctx = etnaviv_iommuv2_context_alloc(global);
380
381         if (!ctx)
382                 return NULL;
383
384         ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
385                                           global->memory_base);
386         if (ret)
387                 goto out_free;
388
389         if (global->version == ETNAVIV_IOMMU_V1 &&
390             ctx->cmdbuf_mapping.iova > 0x80000000) {
391                 dev_err(global->dev,
392                         "command buffer outside valid memory window\n");
393                 goto out_unmap;
394         }
395
396         return ctx;
397
398 out_unmap:
399         etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
400 out_free:
401         global->ops->free(ctx);
402         return NULL;
403 }
404
405 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
406                            struct etnaviv_iommu_context *context)
407 {
408         context->global->ops->restore(gpu, context);
409 }
410
411 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
412                                   struct etnaviv_vram_mapping *mapping,
413                                   u32 memory_base, dma_addr_t paddr,
414                                   size_t size)
415 {
416         mutex_lock(&context->lock);
417
418         if (mapping->use > 0) {
419                 mapping->use++;
420                 mutex_unlock(&context->lock);
421                 return 0;
422         }
423
424         /*
425          * For MMUv1 we don't add the suballoc region to the pagetables, as
426          * those GPUs can only work with cmdbufs accessed through the linear
427          * window. Instead we manufacture a mapping to make it look uniform
428          * to the upper layers.
429          */
430         if (context->global->version == ETNAVIV_IOMMU_V1) {
431                 mapping->iova = paddr - memory_base;
432         } else {
433                 struct drm_mm_node *node = &mapping->vram_node;
434                 int ret;
435
436                 ret = etnaviv_iommu_find_iova(context, node, size);
437                 if (ret < 0) {
438                         mutex_unlock(&context->lock);
439                         return ret;
440                 }
441
442                 mapping->iova = node->start;
443                 ret = etnaviv_context_map(context, node->start, paddr, size,
444                                           ETNAVIV_PROT_READ);
445                 if (ret < 0) {
446                         drm_mm_remove_node(node);
447                         mutex_unlock(&context->lock);
448                         return ret;
449                 }
450
451                 context->flush_seq++;
452         }
453
454         list_add_tail(&mapping->mmu_node, &context->mappings);
455         mapping->use = 1;
456
457         mutex_unlock(&context->lock);
458
459         return 0;
460 }
461
462 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
463                   struct etnaviv_vram_mapping *mapping)
464 {
465         struct drm_mm_node *node = &mapping->vram_node;
466
467         mutex_lock(&context->lock);
468         mapping->use--;
469
470         if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
471                 mutex_unlock(&context->lock);
472                 return;
473         }
474
475         etnaviv_context_unmap(context, node->start, node->size);
476         drm_mm_remove_node(node);
477         mutex_unlock(&context->lock);
478 }
479
480 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
481 {
482         return context->global->ops->dump_size(context);
483 }
484
485 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
486 {
487         context->global->ops->dump(context, buf);
488 }
489
490 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
491 {
492         enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
493         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
494         struct etnaviv_iommu_global *global;
495         struct device *dev = gpu->drm->dev;
496
497         if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
498                 version = ETNAVIV_IOMMU_V2;
499
500         if (priv->mmu_global) {
501                 if (priv->mmu_global->version != version) {
502                         dev_err(gpu->dev,
503                                 "MMU version doesn't match global version\n");
504                         return -ENXIO;
505                 }
506
507                 priv->mmu_global->use++;
508                 return 0;
509         }
510
511         global = kzalloc(sizeof(*global), GFP_KERNEL);
512         if (!global)
513                 return -ENOMEM;
514
515         global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
516                                             GFP_KERNEL);
517         if (!global->bad_page_cpu)
518                 goto free_global;
519
520         memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
521
522         if (version == ETNAVIV_IOMMU_V2) {
523                 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
524                                                &global->v2.pta_dma, GFP_KERNEL);
525                 if (!global->v2.pta_cpu)
526                         goto free_bad_page;
527         }
528
529         global->dev = dev;
530         global->version = version;
531         global->use = 1;
532         mutex_init(&global->lock);
533
534         if (version == ETNAVIV_IOMMU_V1)
535                 global->ops = &etnaviv_iommuv1_ops;
536         else
537                 global->ops = &etnaviv_iommuv2_ops;
538
539         priv->mmu_global = global;
540
541         return 0;
542
543 free_bad_page:
544         dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
545 free_global:
546         kfree(global);
547
548         return -ENOMEM;
549 }
550
551 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
552 {
553         struct etnaviv_drm_private *priv = gpu->drm->dev_private;
554         struct etnaviv_iommu_global *global = priv->mmu_global;
555
556         if (--global->use > 0)
557                 return;
558
559         if (global->v2.pta_cpu)
560                 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
561                             global->v2.pta_cpu, global->v2.pta_dma);
562
563         if (global->bad_page_cpu)
564                 dma_free_wc(global->dev, SZ_4K,
565                             global->bad_page_cpu, global->bad_page_dma);
566
567         mutex_destroy(&global->lock);
568         kfree(global);
569
570         priv->mmu_global = NULL;
571 }