drm/i915/gem: Protect used framebuffers from casual eviction
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 19 Jan 2021 21:43:35 +0000 (21:43 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 20 Jan 2021 21:05:12 +0000 (21:05 +0000)
In the shrinker, we protect framebuffers from light reclaim as we
typically expect framebuffers to be reused in the near future (and with
low latency requirements). We can apply the same logic to the GGTT
eviction and defer framebuffers to the second pass only used if the
caller is desperate enough to wait for space to become available.
In most cases, the caller will use a smaller partial vma instead of
trying to force the object into the GGTT if doing so will cause other
users to be evicted.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119214336.1463-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/i915_vma_types.h

index d898b37..7b38eee 100644 (file)
@@ -225,8 +225,10 @@ static void frontbuffer_release(struct kref *ref)
        struct i915_vma *vma;
 
        spin_lock(&obj->vma.lock);
-       for_each_ggtt_vma(vma, obj)
+       for_each_ggtt_vma(vma, obj) {
+               i915_vma_clear_scanout(vma);
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+       }
        spin_unlock(&obj->vma.lock);
 
        RCU_INIT_POINTER(obj->frontbuffer, NULL);
index f0379b5..a625731 100644 (file)
@@ -416,6 +416,7 @@ retry:
        }
 
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+       i915_vma_mark_scanout(vma);
 
        i915_gem_object_flush_if_display_locked(obj);
 
index e1a66c8..4d2d59a 100644 (file)
@@ -61,6 +61,17 @@ mark_free(struct drm_mm_scan *scan,
        return drm_mm_scan_add_block(scan, &vma->node);
 }
 
+static bool defer_evict(struct i915_vma *vma)
+{
+       if (i915_vma_is_active(vma))
+               return true;
+
+       if (i915_vma_is_scanout(vma))
+               return true;
+
+       return false;
+}
+
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
  * @vm: address space to evict from
@@ -150,7 +161,7 @@ search_again:
                 * To notice when we complete one full cycle, we record the
                 * first active element seen, before moving it to the tail.
                 */
-               if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+               if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) {
                        if (!active)
                                active = vma;
 
index 5b3a3c6..a64adc8 100644 (file)
@@ -363,6 +363,21 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 
 void i915_vma_parked(struct intel_gt *gt);
 
+static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
+{
+       return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_mark_scanout(struct i915_vma *vma)
+{
+       set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
+static inline void i915_vma_clear_scanout(struct i915_vma *vma)
+{
+       clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
+}
+
 #define for_each_until(cond) if (cond) break; else
 
 /**
index 9e9082d..f5cb848 100644 (file)
@@ -249,6 +249,9 @@ struct i915_vma {
 #define I915_VMA_USERFAULT     ((int)BIT(I915_VMA_USERFAULT_BIT))
 #define I915_VMA_GGTT_WRITE    ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
 
+#define I915_VMA_SCANOUT_BIT   18
+#define I915_VMA_SCANOUT       ((int)BIT(I915_VMA_SCANOUT_BIT))
+
        struct i915_active active;
 
 #define I915_VMA_PAGES_BIAS 24