drm/i915: move wedged to the other gpu error handling stuff
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 14 Nov 2012 16:14:05 +0000 (17:14 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Sun, 20 Jan 2013 12:11:15 +0000 (13:11 +0100)
And to make Ben Widawsky happier, use the gpu_error instead of
the entire device as the argument in some functions.

Drop the outdated comment on ->wedged for now, a follow-up patch will
change the semantics and add a proper comment again.

Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index 3b1bf4e..e1b7eaf 100644 (file)
@@ -1672,7 +1672,7 @@ i915_wedged_read(struct file *filp,
 
        len = snprintf(buf, sizeof(buf),
                       "wedged :  %d\n",
-                      atomic_read(&dev_priv->mm.wedged));
+                      atomic_read(&dev_priv->gpu_error.wedged));
 
        if (len > sizeof(buf))
                len = sizeof(buf);
index dfe0e74..62da6c7 100644 (file)
@@ -744,15 +744,6 @@ struct i915_gem_mm {
         */
        int suspended;
 
-       /**
-        * Flag if the hardware appears to be wedged.
-        *
-        * This is set when attempts to idle the device timeout.
-        * It prevents command submission from occurring and makes
-        * every pending request fail
-        */
-       atomic_t wedged;
-
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@ -784,6 +775,8 @@ struct i915_gpu_error {
 
        unsigned long last_reset;
 
+       atomic_t wedged;
+
        /* For gpu hang simulation. */
        unsigned int stop_rings;
 };
@@ -1548,7 +1541,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
-int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
 
 void i915_gem_reset(struct drm_device *dev);
index 95e022e..04b2f92 100644 (file)
@@ -87,14 +87,13 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 }
 
 static int
-i915_gem_wait_for_error(struct drm_device *dev)
+i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct completion *x = &dev_priv->gpu_error.completion;
+       struct completion *x = &error->completion;
        unsigned long flags;
        int ret;
 
-       if (!atomic_read(&dev_priv->mm.wedged))
+       if (!atomic_read(&error->wedged))
                return 0;
 
        /*
@@ -110,7 +109,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
                return ret;
        }
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
+       if (atomic_read(&error->wedged)) {
                /* GPU is hung, bump the completion count to account for
                 * the token we just consumed so that we never hit zero and
                 * end up waiting upon a subsequent completion event that
@@ -125,9 +124,10 @@ i915_gem_wait_for_error(struct drm_device *dev)
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_gem_wait_for_error(dev);
+       ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
        if (ret)
                return ret;
 
@@ -939,11 +939,11 @@ unlock:
 }
 
 int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+i915_gem_check_wedge(struct i915_gpu_error *error,
                     bool interruptible)
 {
-       if (atomic_read(&dev_priv->mm.wedged)) {
-               struct completion *x = &dev_priv->gpu_error.completion;
+       if (atomic_read(&error->wedged)) {
+               struct completion *x = &error->completion;
                bool recovery_complete;
                unsigned long flags;
 
@@ -1025,7 +1025,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 
 #define EXIT_COND \
        (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-       atomic_read(&dev_priv->mm.wedged))
+       atomic_read(&dev_priv->gpu_error.wedged))
        do {
                if (interruptible)
                        end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1035,7 +1035,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        end = wait_event_timeout(ring->irq_queue, EXIT_COND,
                                                 timeout_jiffies);
 
-               ret = i915_gem_check_wedge(dev_priv, interruptible);
+               ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
                if (ret)
                        end = ret;
        } while (end == 0 && wait_forever);
@@ -1081,7 +1081,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(seqno == 0);
 
-       ret = i915_gem_check_wedge(dev_priv, interruptible);
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
        if (ret)
                return ret;
 
@@ -1146,7 +1146,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (seqno == 0)
                return 0;
 
-       ret = i915_gem_check_wedge(dev_priv, true);
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
 
@@ -1379,7 +1379,7 @@ out:
                /* If this -EIO is due to a gpu hang, give the reset code a
                 * chance to clean up the mess. Otherwise return the proper
                 * SIGBUS. */
-               if (!atomic_read(&dev_priv->mm.wedged))
+               if (!atomic_read(&dev_priv->gpu_error.wedged))
                        return VM_FAULT_SIGBUS;
        case -EAGAIN:
                /* Give the error handler a chance to run and move the
@@ -3390,7 +3390,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        u32 seqno = 0;
        int ret;
 
-       if (atomic_read(&dev_priv->mm.wedged))
+       if (atomic_read(&dev_priv->gpu_error.wedged))
                return -EIO;
 
        spin_lock(&file_priv->mm.lock);
@@ -3978,9 +3978,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
+       if (atomic_read(&dev_priv->gpu_error.wedged)) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
-               atomic_set(&dev_priv->mm.wedged, 0);
+               atomic_set(&dev_priv->gpu_error.wedged, 0);
        }
 
        mutex_lock(&dev->struct_mutex);
index c768ebd..f2c0016 100644 (file)
@@ -871,11 +871,11 @@ static void i915_error_work_func(struct work_struct *work)
 
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
-       if (atomic_read(&dev_priv->mm.wedged)) {
+       if (atomic_read(&dev_priv->gpu_error.wedged)) {
                DRM_DEBUG_DRIVER("resetting chip\n");
                kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
                if (!i915_reset(dev)) {
-                       atomic_set(&dev_priv->mm.wedged, 0);
+                       atomic_set(&dev_priv->gpu_error.wedged, 0);
                        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
                }
                complete_all(&dev_priv->gpu_error.completion);
@@ -1483,7 +1483,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
 
        if (wedged) {
                INIT_COMPLETION(dev_priv->gpu_error.completion);
-               atomic_set(&dev_priv->mm.wedged, 1);
+               atomic_set(&dev_priv->gpu_error.wedged, 1);
 
                /*
                 * Wakeup waiting processes so they don't hang
index b35902e..160aa5f 100644 (file)
@@ -2223,7 +2223,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
        WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
 
        wait_event(dev_priv->pending_flip_queue,
-                  atomic_read(&dev_priv->mm.wedged) ||
+                  atomic_read(&dev_priv->gpu_error.wedged) ||
                   atomic_read(&obj->pending_flip) == 0);
 
        /* Big Hammer, we also need to ensure that any pending
@@ -2871,7 +2871,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        unsigned long flags;
        bool pending;
 
-       if (atomic_read(&dev_priv->mm.wedged))
+       if (atomic_read(&dev_priv->gpu_error.wedged))
                return false;
 
        spin_lock_irqsave(&dev->event_lock, flags);
index d6b06aa..9438bcd 100644 (file)
@@ -1371,7 +1371,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
 
                msleep(1);
 
-               ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+               ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+                                          dev_priv->mm.interruptible);
                if (ret)
                        return ret;
        } while (!time_after(jiffies, end));
@@ -1460,7 +1461,8 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        int ret;
 
-       ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+                                  dev_priv->mm.interruptible);
        if (ret)
                return ret;