drm/i915: clean up shrinker_release_pages
authorMatthew Auld <matthew.auld@intel.com>
Wed, 15 Dec 2021 11:07:46 +0000 (11:07 +0000)
committerMatthew Auld <matthew.auld@intel.com>
Mon, 10 Jan 2022 10:49:50 +0000 (10:49 +0000)
Add some proper flags for the different modes, and shorten the name to
something more snappy.

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211215110746.865-2-matthew.auld@intel.com
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c

index 00c844c..6f446cc 100644 (file)
@@ -57,9 +57,26 @@ struct drm_i915_gem_object_ops {
        void (*put_pages)(struct drm_i915_gem_object *obj,
                          struct sg_table *pages);
        int (*truncate)(struct drm_i915_gem_object *obj);
-       int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
-                                     bool no_gpu_wait,
-                                     bool should_writeback);
+       /**
+        * shrink - Perform further backend specific actions to facilate
+        * shrinking.
+        * @obj: The gem object
+        * @flags: Extra flags to control shrinking behaviour in the backend
+        *
+        * Possible values for @flags:
+        *
+        * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
+        * backing pages, if supported.
+        *
+        * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
+        * idle.  Active objects can be considered later. The TTM backend for
+        * example might have aync migrations going on, which don't use any
+        * i915_vma to track the active GTT binding, and hence having an unbound
+        * object might not be enough.
+        */
+#define I915_GEM_OBJECT_SHRINK_WRITEBACK   BIT(0)
+#define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
+       int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
 
        int (*pread)(struct drm_i915_gem_object *obj,
                     const struct drm_i915_gem_pread *arg);
index 7fdf4fa..6c57b0a 100644 (file)
@@ -331,9 +331,7 @@ shmem_writeback(struct drm_i915_gem_object *obj)
        __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
 }
 
-static int shmem_shrinker_release_pages(struct drm_i915_gem_object *obj,
-                                       bool no_gpu_wait,
-                                       bool writeback)
+static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 {
        switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
@@ -342,7 +340,7 @@ static int shmem_shrinker_release_pages(struct drm_i915_gem_object *obj,
                return 0;
        }
 
-       if (writeback)
+       if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
                shmem_writeback(obj);
 
        return 0;
@@ -520,7 +518,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
        .get_pages = shmem_get_pages,
        .put_pages = shmem_put_pages,
        .truncate = shmem_truncate,
-       .shrinker_release_pages = shmem_shrinker_release_pages,
+       .shrink = shmem_shrink,
 
        .pwrite = shmem_pwrite,
        .pread = shmem_pread,
index 60b35e4..6a6ff98 100644 (file)
@@ -57,10 +57,18 @@ static int drop_pages(struct drm_i915_gem_object *obj,
 
 static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
 {
-       if (obj->ops->shrinker_release_pages)
-               return obj->ops->shrinker_release_pages(obj,
-                                                       !(flags & I915_SHRINK_ACTIVE),
-                                                       flags & I915_SHRINK_WRITEBACK);
+       if (obj->ops->shrink) {
+               unsigned int shrink_flags = 0;
+
+               if (!(flags & I915_SHRINK_ACTIVE))
+                       shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
+
+               if (flags & I915_SHRINK_WRITEBACK)
+                       shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
+
+               return obj->ops->shrink(obj, shrink_flags);
+       }
+
        return 0;
 }
 
index 923cc7a..21277f3 100644 (file)
@@ -424,16 +424,14 @@ int i915_ttm_purge(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
-                                          bool no_wait_gpu,
-                                          bool should_writeback)
+static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
        struct i915_ttm_tt *i915_tt =
                container_of(bo->ttm, typeof(*i915_tt), ttm);
        struct ttm_operation_ctx ctx = {
                .interruptible = true,
-               .no_wait_gpu = no_wait_gpu,
+               .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
        };
        struct ttm_placement place = {};
        int ret;
@@ -467,7 +465,7 @@ static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
                return ret;
        }
 
-       if (should_writeback)
+       if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
                __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
 
        return 0;
@@ -953,7 +951,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
        .get_pages = i915_ttm_get_pages,
        .put_pages = i915_ttm_put_pages,
        .truncate = i915_ttm_purge,
-       .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+       .shrink = i915_ttm_shrink,
 
        .adjust_lru = i915_ttm_adjust_lru,
        .delayed_free = i915_ttm_delayed_free,