drm/msm/mdp4: flush vblank event on disable
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
9
10 #include "msm_drv.h"
11 #include "msm_gem.h"
12 #include "msm_gpu.h"
13 #include "msm_gpu_trace.h"
14
15 /* Default disabled for now until it has some more testing on the different
16  * iommu combinations that can be paired with the driver:
17  */
18 static bool enable_eviction = true;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
21
22 static bool can_swap(void)
23 {
24         return enable_eviction && get_nr_swap_pages() > 0;
25 }
26
27 static bool can_block(struct shrink_control *sc)
28 {
29         if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30                 return false;
31         return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32 }
33
34 static unsigned long
35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36 {
37         struct msm_drm_private *priv =
38                 container_of(shrinker, struct msm_drm_private, shrinker);
39         unsigned count = priv->lru.dontneed.count;
40
41         if (can_swap())
42                 count += priv->lru.willneed.count;
43
44         return count;
45 }
46
47 static bool
48 purge(struct drm_gem_object *obj)
49 {
50         if (!is_purgeable(to_msm_bo(obj)))
51                 return false;
52
53         if (msm_gem_active(obj))
54                 return false;
55
56         msm_gem_purge(obj);
57
58         return true;
59 }
60
61 static bool
62 evict(struct drm_gem_object *obj)
63 {
64         if (is_unevictable(to_msm_bo(obj)))
65                 return false;
66
67         if (msm_gem_active(obj))
68                 return false;
69
70         msm_gem_evict(obj);
71
72         return true;
73 }
74
75 static bool
76 wait_for_idle(struct drm_gem_object *obj)
77 {
78         enum dma_resv_usage usage = dma_resv_usage_rw(true);
79         return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
80 }
81
82 static bool
83 active_purge(struct drm_gem_object *obj)
84 {
85         if (!wait_for_idle(obj))
86                 return false;
87
88         return purge(obj);
89 }
90
91 static bool
92 active_evict(struct drm_gem_object *obj)
93 {
94         if (!wait_for_idle(obj))
95                 return false;
96
97         return evict(obj);
98 }
99
100 static unsigned long
101 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
102 {
103         struct msm_drm_private *priv =
104                 container_of(shrinker, struct msm_drm_private, shrinker);
105         struct {
106                 struct drm_gem_lru *lru;
107                 bool (*shrink)(struct drm_gem_object *obj);
108                 bool cond;
109                 unsigned long freed;
110                 unsigned long remaining;
111         } stages[] = {
112                 /* Stages of progressively more aggressive/expensive reclaim: */
113                 { &priv->lru.dontneed, purge,        true },
114                 { &priv->lru.willneed, evict,        can_swap() },
115                 { &priv->lru.dontneed, active_purge, can_block(sc) },
116                 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
117         };
118         long nr = sc->nr_to_scan;
119         unsigned long freed = 0;
120         unsigned long remaining = 0;
121
122         for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
123                 if (!stages[i].cond)
124                         continue;
125                 stages[i].freed =
126                         drm_gem_lru_scan(stages[i].lru, nr,
127                                         &stages[i].remaining,
128                                          stages[i].shrink);
129                 nr -= stages[i].freed;
130                 freed += stages[i].freed;
131                 remaining += stages[i].remaining;
132         }
133
134         if (freed) {
135                 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
136                                      stages[1].freed, stages[2].freed,
137                                      stages[3].freed);
138         }
139
140         return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
141 }
142
143 #ifdef CONFIG_DEBUG_FS
144 unsigned long
145 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
146 {
147         struct msm_drm_private *priv = dev->dev_private;
148         struct shrink_control sc = {
149                 .nr_to_scan = nr_to_scan,
150         };
151         int ret;
152
153         fs_reclaim_acquire(GFP_KERNEL);
154         ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
155         fs_reclaim_release(GFP_KERNEL);
156
157         return ret;
158 }
159 #endif
160
161 /* since we don't know any better, lets bail after a few
162  * and if necessary the shrinker will be invoked again.
163  * Seems better than unmapping *everything*
164  */
165 static const int vmap_shrink_limit = 15;
166
167 static bool
168 vmap_shrink(struct drm_gem_object *obj)
169 {
170         if (!is_vunmapable(to_msm_bo(obj)))
171                 return false;
172
173         msm_gem_vunmap(obj);
174
175         return true;
176 }
177
178 static int
179 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
180 {
181         struct msm_drm_private *priv =
182                 container_of(nb, struct msm_drm_private, vmap_notifier);
183         struct drm_gem_lru *lrus[] = {
184                 &priv->lru.dontneed,
185                 &priv->lru.willneed,
186                 &priv->lru.pinned,
187                 NULL,
188         };
189         unsigned idx, unmapped = 0;
190         unsigned long remaining = 0;
191
192         for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
193                 unmapped += drm_gem_lru_scan(lrus[idx],
194                                              vmap_shrink_limit - unmapped,
195                                              &remaining,
196                                              vmap_shrink);
197         }
198
199         *(unsigned long *)ptr += unmapped;
200
201         if (unmapped > 0)
202                 trace_msm_gem_purge_vmaps(unmapped);
203
204         return NOTIFY_DONE;
205 }
206
207 /**
208  * msm_gem_shrinker_init - Initialize msm shrinker
209  * @dev: drm device
210  *
211  * This function registers and sets up the msm shrinker.
212  */
213 void msm_gem_shrinker_init(struct drm_device *dev)
214 {
215         struct msm_drm_private *priv = dev->dev_private;
216         priv->shrinker.count_objects = msm_gem_shrinker_count;
217         priv->shrinker.scan_objects = msm_gem_shrinker_scan;
218         priv->shrinker.seeks = DEFAULT_SEEKS;
219         WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
220
221         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
222         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
223 }
224
225 /**
226  * msm_gem_shrinker_cleanup - Clean up msm shrinker
227  * @dev: drm device
228  *
229  * This function unregisters the msm shrinker.
230  */
231 void msm_gem_shrinker_cleanup(struct drm_device *dev)
232 {
233         struct msm_drm_private *priv = dev->dev_private;
234
235         if (priv->shrinker.nr_deferred) {
236                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
237                 unregister_shrinker(&priv->shrinker);
238         }
239 }