drm/i915: Wait for completion of pending flips when starved of fences
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index 2be904c..00c8361 100644 (file)
@@ -1465,6 +1465,22 @@ out:
        return ret;
 }
 
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct i915_vma *vma;
+
+       /*
+        * Only the global gtt is relevant for gtt memory mappings, so restrict
+        * list traversal to objects bound into the global address space. Note
+        * that the active list should be empty, but better safe than sorry.
+        */
+       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
+       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
+}
+
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -2314,7 +2330,7 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 
        if (ring->hangcheck.action != HANGCHECK_WAIT &&
            i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
+               DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
                          offset,
@@ -2354,28 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
        kfree(request);
 }
 
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
-                                     struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+                                      struct intel_ring_buffer *ring)
 {
-       u32 completed_seqno;
-       u32 acthd;
-
-       acthd = intel_ring_get_active_head(ring);
-       completed_seqno = ring->get_seqno(ring, false);
-
-       while (!list_empty(&ring->request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&ring->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
+       u32 completed_seqno = ring->get_seqno(ring, false);
+       u32 acthd = intel_ring_get_active_head(ring);
+       struct drm_i915_gem_request *request;
 
-               if (request->seqno > completed_seqno)
-                       i915_set_reset_status(ring, request, acthd);
+       list_for_each_entry(request, &ring->request_list, list) {
+               if (i915_seqno_passed(completed_seqno, request->seqno))
+                       continue;
 
-               i915_gem_free_request(request);
+               i915_set_reset_status(ring, request, acthd);
        }
+}
 
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+                                       struct intel_ring_buffer *ring)
+{
        while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj;
 
@@ -2385,6 +2397,23 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 
                i915_gem_object_move_to_inactive(obj);
        }
+
+       /*
+        * We must free the requests after all the corresponding objects have
+        * been moved off active lists. Which is the same order as the normal
+        * retire_requests function does. This is important if object hold
+        * implicit references on things like e.g. ppgtt address spaces through
+        * the request.
+        */
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               i915_gem_free_request(request);
+       }
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2414,8 +2443,16 @@ void i915_gem_reset(struct drm_device *dev)
        struct intel_ring_buffer *ring;
        int i;
 
+       /*
+        * Before we free the objects from the requests, we need to inspect
+        * them for finding the guilty party. As the requests only borrow
+        * their reference to the objects, the inspection must be done first.
+        */
        for_each_ring(ring, dev_priv, i)
-               i915_gem_reset_ring_lists(dev_priv, ring);
+               i915_gem_reset_ring_status(dev_priv, ring);
+
+       for_each_ring(ring, dev_priv, i)
+               i915_gem_reset_ring_cleanup(dev_priv, ring);
 
        i915_gem_cleanup_ringbuffer(dev);
 
@@ -3069,7 +3106,7 @@ i915_find_fence_reg(struct drm_device *dev)
        }
 
        if (avail == NULL)
-               return NULL;
+               goto deadlock;
 
        /* None available, try to steal one or wait for a user to finish */
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3079,7 +3116,12 @@ i915_find_fence_reg(struct drm_device *dev)
                return reg;
        }
 
-       return NULL;
+deadlock:
+       /* Wait for completion of pending flips which consume fences */
+       if (intel_has_pending_fb_unpin(dev))
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-EDEADLK);
 }
 
 /**
@@ -3124,8 +3166,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                }
        } else if (enable) {
                reg = i915_find_fence_reg(dev);
-               if (reg == NULL)
-                       return -EDEADLK;
+               if (IS_ERR(reg))
+                       return PTR_ERR(reg);
 
                if (reg->obj) {
                        struct drm_i915_gem_object *old = reg->obj;