void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int (*truncate)(struct drm_i915_gem_object *obj);
- int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
- bool no_gpu_wait,
- bool should_writeback);
+ /**
+ * shrink - Perform further backend specific actions to facilate
+ * shrinking.
+ * @obj: The gem object
+ * @flags: Extra flags to control shrinking behaviour in the backend
+ *
+ * Possible values for @flags:
+ *
+ * I915_GEM_OBJECT_SHRINK_WRITEBACK - Try to perform writeback of the
+ * backing pages, if supported.
+ *
+ * I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT - Don't wait for the object to
+ * idle. Active objects can be considered later. The TTM backend for
+ * example might have aync migrations going on, which don't use any
+ * i915_vma to track the active GTT binding, and hence having an unbound
+ * object might not be enough.
+ */
+#define I915_GEM_OBJECT_SHRINK_WRITEBACK BIT(0)
+#define I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT BIT(1)
+ int (*shrink)(struct drm_i915_gem_object *obj, unsigned int flags);
int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg);
__shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
}
-static int shmem_shrinker_release_pages(struct drm_i915_gem_object *obj,
- bool no_gpu_wait,
- bool writeback)
+static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
return 0;
}
- if (writeback)
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
shmem_writeback(obj);
return 0;
.get_pages = shmem_get_pages,
.put_pages = shmem_put_pages,
.truncate = shmem_truncate,
- .shrinker_release_pages = shmem_shrinker_release_pages,
+ .shrink = shmem_shrink,
.pwrite = shmem_pwrite,
.pread = shmem_pread,
static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
{
- if (obj->ops->shrinker_release_pages)
- return obj->ops->shrinker_release_pages(obj,
- !(flags & I915_SHRINK_ACTIVE),
- flags & I915_SHRINK_WRITEBACK);
+ if (obj->ops->shrink) {
+ unsigned int shrink_flags = 0;
+
+ if (!(flags & I915_SHRINK_ACTIVE))
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
+
+ if (flags & I915_SHRINK_WRITEBACK)
+ shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
+
+ return obj->ops->shrink(obj, shrink_flags);
+ }
+
return 0;
}
return 0;
}
-static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj,
- bool no_wait_gpu,
- bool should_writeback)
+static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct i915_ttm_tt *i915_tt =
container_of(bo->ttm, typeof(*i915_tt), ttm);
struct ttm_operation_ctx ctx = {
.interruptible = true,
- .no_wait_gpu = no_wait_gpu,
+ .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
};
struct ttm_placement place = {};
int ret;
return ret;
}
- if (should_writeback)
+ if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
__shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
return 0;
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
.truncate = i915_ttm_purge,
- .shrinker_release_pages = i915_ttm_shrinker_release_pages,
+ .shrink = i915_ttm_shrink,
.adjust_lru = i915_ttm_adjust_lru,
.delayed_free = i915_ttm_delayed_free,