From: Chris Wilson Date: Fri, 13 Oct 2017 20:26:19 +0000 (+0100) Subject: drm/i915: Set our shrinker->batch to 4096 (~16MiB) X-Git-Tag: v4.19~298^2~46^2~1323 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c5418a8b38a4ff9284e8e59c06a24f443197131c;p=platform%2Fkernel%2Flinux-rpi3.git drm/i915: Set our shrinker->batch to 4096 (~16MiB) Prefer to defer activating our GEM shrinker until we have a few megabytes to free; or we have accumulated sufficient mempressure by deferring the reclaim to force a shrink. The intent is that because our objects may typically be large, we are too effective at shrinking and are not rewarded for freeing more pages than the batch. It will also defer the initial shrinking to hopefully put it at a lower priority than say the buffer cache (although it will balance out over a number of reclaims, with GEM being more bursty). v2: Give it a feedback system to try and tune the batch size towards an effective size for the available objects. v3: Start keeping track of shrinker stats in debugfs v4: Protect against finding no shrinkable objects (div-by-zero) Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-7-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 997fedd..6d64364 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3331,6 +3331,16 @@ static int i915_engine_info(struct seq_file *m, void *unused) return 0; } +static int i915_shrinker_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = node_to_i915(m->private); + + seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); + seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); + + return 0; +} + static int i915_semaphore_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4811,6 +4821,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_dmc_info", i915_dmc_info, 0}, {"i915_display_info", i915_display_info, 0}, {"i915_engine_info", i915_engine_info, 0}, + {"i915_shrinker_info", i915_shrinker_info, 0}, {"i915_semaphore_status", i915_semaphore_status, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 1757651..eb31f8a 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -295,20 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { - struct drm_i915_private *dev_priv = + struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); struct drm_i915_gem_object *obj; + unsigned long num_objects = 0; unsigned long count = 0; - spin_lock(&dev_priv->mm.obj_lock); - list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) - if (can_release_pages(obj)) + spin_lock(&i915->mm.obj_lock); + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) + if (can_release_pages(obj)) { count += obj->base.size >> PAGE_SHIFT; + num_objects++; + } - list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) + if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { count += obj->base.size >> PAGE_SHIFT; - spin_unlock(&dev_priv->mm.obj_lock); + num_objects++; + } + spin_unlock(&i915->mm.obj_lock); + + /* Update our preferred vmscan batch size for the next pass. + * Our rough guess for an effective batch size is roughly 2 + * available GEM objects worth of pages. That is we don't want + * the shrinker to fire, until it is worth the cost of freeing an + * entire GEM object. + */ + if (num_objects) { + unsigned long avg = 2 * count / num_objects; + + i915->mm.shrinker.batch = + max((i915->mm.shrinker.batch + avg) >> 1, + 128ul /* default SHRINK_BATCH */); + } return count; } @@ -473,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; + dev_priv->mm.shrinker.batch = 4096; WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;