Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / msm / msm_gem_vma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6
7 #include "msm_drv.h"
8 #include "msm_fence.h"
9 #include "msm_gem.h"
10 #include "msm_mmu.h"
11
12 static void
13 msm_gem_address_space_destroy(struct kref *kref)
14 {
15         struct msm_gem_address_space *aspace = container_of(kref,
16                         struct msm_gem_address_space, kref);
17
18         drm_mm_takedown(&aspace->mm);
19         if (aspace->mmu)
20                 aspace->mmu->funcs->destroy(aspace->mmu);
21         put_pid(aspace->pid);
22         kfree(aspace);
23 }
24
25
26 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
27 {
28         if (aspace)
29                 kref_put(&aspace->kref, msm_gem_address_space_destroy);
30 }
31
32 struct msm_gem_address_space *
33 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34 {
35         if (!IS_ERR_OR_NULL(aspace))
36                 kref_get(&aspace->kref);
37
38         return aspace;
39 }
40
41 /* Actually unmap memory for the vma */
42 void msm_gem_vma_purge(struct msm_gem_vma *vma)
43 {
44         struct msm_gem_address_space *aspace = vma->aspace;
45         unsigned size = vma->node.size;
46
47         /* Don't do anything if the memory isn't mapped */
48         if (!vma->mapped)
49                 return;
50
51         aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
52
53         vma->mapped = false;
54 }
55
56 /* Map and pin vma: */
57 int
58 msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
59                 struct sg_table *sgt, int size)
60 {
61         struct msm_gem_address_space *aspace = vma->aspace;
62         int ret;
63
64         if (GEM_WARN_ON(!vma->iova))
65                 return -EINVAL;
66
67         if (vma->mapped)
68                 return 0;
69
70         vma->mapped = true;
71
72         if (!aspace)
73                 return 0;
74
75         /*
76          * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
77          * a lock across map/unmap which is also used in the job_run()
78          * path, as this can cause deadlock in job_run() vs shrinker/
79          * reclaim.
80          *
81          * Revisit this if we can come up with a scheme to pre-alloc pages
82          * for the pgtable in map/unmap ops.
83          */
84         ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
85
86         if (ret) {
87                 vma->mapped = false;
88         }
89
90         return ret;
91 }
92
93 /* Close an iova.  Warn if it is still in use */
94 void msm_gem_vma_close(struct msm_gem_vma *vma)
95 {
96         struct msm_gem_address_space *aspace = vma->aspace;
97
98         GEM_WARN_ON(vma->mapped);
99
100         spin_lock(&aspace->lock);
101         if (vma->iova)
102                 drm_mm_remove_node(&vma->node);
103         spin_unlock(&aspace->lock);
104
105         vma->iova = 0;
106
107         msm_gem_address_space_put(aspace);
108 }
109
110 struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
111 {
112         struct msm_gem_vma *vma;
113
114         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
115         if (!vma)
116                 return NULL;
117
118         vma->aspace = aspace;
119
120         return vma;
121 }
122
123 /* Initialize a new vma and allocate an iova for it */
124 int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
125                 u64 range_start, u64 range_end)
126 {
127         struct msm_gem_address_space *aspace = vma->aspace;
128         int ret;
129
130         if (GEM_WARN_ON(!aspace))
131                 return -EINVAL;
132
133         if (GEM_WARN_ON(vma->iova))
134                 return -EBUSY;
135
136         spin_lock(&aspace->lock);
137         ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
138                                           size, PAGE_SIZE, 0,
139                                           range_start, range_end, 0);
140         spin_unlock(&aspace->lock);
141
142         if (ret)
143                 return ret;
144
145         vma->iova = vma->node.start;
146         vma->mapped = false;
147
148         kref_get(&aspace->kref);
149
150         return 0;
151 }
152
153 struct msm_gem_address_space *
154 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
155                 u64 va_start, u64 size)
156 {
157         struct msm_gem_address_space *aspace;
158
159         if (IS_ERR(mmu))
160                 return ERR_CAST(mmu);
161
162         aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
163         if (!aspace)
164                 return ERR_PTR(-ENOMEM);
165
166         spin_lock_init(&aspace->lock);
167         aspace->name = name;
168         aspace->mmu = mmu;
169         aspace->va_start = va_start;
170         aspace->va_size  = size;
171
172         drm_mm_init(&aspace->mm, va_start, size);
173
174         kref_init(&aspace->kref);
175
176         return aspace;
177 }