1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
13 #include "msm_gpu_trace.h"
15 /* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
18 static bool enable_eviction = true;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
22 static bool can_swap(void)
24 return enable_eviction && get_nr_swap_pages() > 0;
27 static bool can_block(struct shrink_control *sc)
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
37 struct msm_drm_private *priv =
38 container_of(shrinker, struct msm_drm_private, shrinker);
39 unsigned count = priv->lru.dontneed.count;
42 count += priv->lru.willneed.count;
48 purge(struct drm_gem_object *obj)
50 if (!is_purgeable(to_msm_bo(obj)))
53 if (msm_gem_active(obj))
62 evict(struct drm_gem_object *obj)
64 if (is_unevictable(to_msm_bo(obj)))
67 if (msm_gem_active(obj))
76 wait_for_idle(struct drm_gem_object *obj)
78 enum dma_resv_usage usage = dma_resv_usage_rw(true);
79 return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
83 active_purge(struct drm_gem_object *obj)
85 if (!wait_for_idle(obj))
92 active_evict(struct drm_gem_object *obj)
94 if (!wait_for_idle(obj))
101 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
103 struct msm_drm_private *priv =
104 container_of(shrinker, struct msm_drm_private, shrinker);
106 struct drm_gem_lru *lru;
107 bool (*shrink)(struct drm_gem_object *obj);
110 unsigned long remaining;
112 /* Stages of progressively more aggressive/expensive reclaim: */
113 { &priv->lru.dontneed, purge, true },
114 { &priv->lru.willneed, evict, can_swap() },
115 { &priv->lru.dontneed, active_purge, can_block(sc) },
116 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
118 long nr = sc->nr_to_scan;
119 unsigned long freed = 0;
120 unsigned long remaining = 0;
122 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
126 drm_gem_lru_scan(stages[i].lru, nr,
127 &stages[i].remaining,
129 nr -= stages[i].freed;
130 freed += stages[i].freed;
131 remaining += stages[i].remaining;
135 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
136 stages[1].freed, stages[2].freed,
140 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
143 #ifdef CONFIG_DEBUG_FS
145 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
147 struct msm_drm_private *priv = dev->dev_private;
148 struct shrink_control sc = {
149 .nr_to_scan = nr_to_scan,
153 fs_reclaim_acquire(GFP_KERNEL);
154 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
155 fs_reclaim_release(GFP_KERNEL);
161 /* since we don't know any better, lets bail after a few
162 * and if necessary the shrinker will be invoked again.
163 * Seems better than unmapping *everything*
165 static const int vmap_shrink_limit = 15;
168 vmap_shrink(struct drm_gem_object *obj)
170 if (!is_vunmapable(to_msm_bo(obj)))
179 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
181 struct msm_drm_private *priv =
182 container_of(nb, struct msm_drm_private, vmap_notifier);
183 struct drm_gem_lru *lrus[] = {
189 unsigned idx, unmapped = 0;
190 unsigned long remaining = 0;
192 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
193 unmapped += drm_gem_lru_scan(lrus[idx],
194 vmap_shrink_limit - unmapped,
199 *(unsigned long *)ptr += unmapped;
202 trace_msm_gem_purge_vmaps(unmapped);
208 * msm_gem_shrinker_init - Initialize msm shrinker
211 * This function registers and sets up the msm shrinker.
213 void msm_gem_shrinker_init(struct drm_device *dev)
215 struct msm_drm_private *priv = dev->dev_private;
216 priv->shrinker.count_objects = msm_gem_shrinker_count;
217 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
218 priv->shrinker.seeks = DEFAULT_SEEKS;
219 WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
221 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
222 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
226 * msm_gem_shrinker_cleanup - Clean up msm shrinker
229 * This function unregisters the msm shrinker.
231 void msm_gem_shrinker_cleanup(struct drm_device *dev)
233 struct msm_drm_private *priv = dev->dev_private;
235 if (priv->shrinker.nr_deferred) {
236 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
237 unregister_shrinker(&priv->shrinker);