Simplify the fencing code and differentiate between flushes and
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Wed, 30 Jan 2008 21:06:02 +0000 (22:06 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Wed, 30 Jan 2008 21:06:02 +0000 (22:06 +0100)
waiting types.
Add a "command_stream_barrier" method to the bo driver.

15 files changed:
linux-core/drm_bo.c
linux-core/drm_fence.c
linux-core/drm_objects.h
linux-core/i915_drv.c
linux-core/i915_fence.c
linux-core/nouveau_buffer.c
linux-core/nouveau_fence.c
linux-core/via_fence.c
linux-core/xgi_drv.c
linux-core/xgi_fence.c
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/via_drv.c
shared-core/via_drv.h
shared-core/via_map.c

index df10e12..3b180d1 100644 (file)
@@ -287,7 +287,7 @@ int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
        DRM_ASSERT_LOCKED(&bo->mutex);
 
        if (bo->fence) {
-               if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
+               if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
@@ -354,7 +354,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
        DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
 
        if (bo->fence && drm_fence_object_signaled(bo->fence,
-                                                  bo->fence_type, 0))
+                                                  bo->fence_type))
                drm_fence_usage_deref_unlocked(&bo->fence);
 
        if (bo->fence && remove_all)
@@ -559,7 +559,7 @@ void drm_putback_buffer_objects(struct drm_device *dev)
 
                list_del_init(&entry->lru);
                DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-               DRM_WAKEUP(&entry->event_queue);
+               wake_up_all(&entry->event_queue);
 
                /*
                 * FIXME: Might want to put back on head of list
@@ -660,7 +660,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
                        entry->fence_type = entry->new_fence_type;
                        DRM_FLAG_MASKED(entry->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
-                       DRM_WAKEUP(&entry->event_queue);
+                       wake_up_all(&entry->event_queue);
                        drm_bo_add_to_lru(entry);
                }
                mutex_unlock(&entry->mutex);
@@ -1032,7 +1032,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object *bo)
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
@@ -1052,12 +1052,12 @@ static int drm_bo_busy(struct drm_buffer_object *bo)
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
                drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
@@ -1249,7 +1249,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
                if (atomic_add_negative(-1, &bo->mapped))
-                       DRM_WAKEUP(&bo->event_queue);
+                       wake_up_all(&bo->event_queue);
 
        } else
                drm_bo_fill_rep_arg(bo, rep);
@@ -1306,7 +1306,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
        BUG_ON(action != _DRM_REF_TYPE1);
 
        if (atomic_add_negative(-1, &bo->mapped))
-               DRM_WAKEUP(&bo->event_queue);
+               wake_up_all(&bo->event_queue);
 }
 
 /*
@@ -1364,7 +1364,7 @@ out_unlock:
                }
                drm_bo_add_to_lru(bo);
                if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-                       DRM_WAKEUP(&bo->event_queue);
+                       wake_up_all(&bo->event_queue);
                        DRM_FLAG_MASKED(bo->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
                }
@@ -1442,13 +1442,21 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
         * We're switching command submission mechanism,
         * or cannot simply rely on the hardware serializing for us.
         *
-        * Wait for buffer idle.
+        * Insert a driver-dependant barrier or wait for buffer idle.
         */
 
        if ((fence_class != bo->fence_class) ||
            ((ftype ^ bo->fence_type) & bo->fence_type)) {
 
-               ret = drm_bo_wait(bo, 0, 0, no_wait);
+               ret = -EINVAL;
+               if (driver->command_stream_barrier) {
+                       ret = driver->command_stream_barrier(bo,
+                                                            fence_class,
+                                                            ftype,
+                                                            no_wait);
+               }
+               if (ret)
+                       ret = drm_bo_wait(bo, 0, 0, no_wait);
 
                if (ret)
                        return ret;
@@ -1539,7 +1547,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
        } else {
                drm_bo_add_to_lru(bo);
                if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-                       DRM_WAKEUP(&bo->event_queue);
+                       wake_up_all(&bo->event_queue);
                        DRM_FLAG_MASKED(bo->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
                }
index 288b4db..a852c63 100644 (file)
 
 #include "drmP.h"
 
+
+/*
+ * Convenience function to be called by fence::wait methods that
+ * need polling.
+ */
+
+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+                          int interruptible, uint32_t mask, 
+                          unsigned long end_jiffies)
+{
+       struct drm_device *dev = fence->dev;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
+       uint32_t count = 0;
+       int ret;
+
+       DECLARE_WAITQUEUE(entry, current);
+       add_wait_queue(&fc->fence_queue, &entry);
+
+       ret = 0;
+       
+       for (;;) {
+               __set_current_state((interruptible) ? 
+                                   TASK_INTERRUPTIBLE :
+                                   TASK_UNINTERRUPTIBLE);
+               if (drm_fence_object_signaled(fence, mask))
+                       break;
+               if (time_after_eq(jiffies, end_jiffies)) {
+                       ret = -EBUSY;
+                       break;
+               }
+               if (lazy)
+                       schedule_timeout(1);
+               else if ((++count & 0x0F) == 0){
+                       __set_current_state(TASK_RUNNING);
+                       schedule();
+                       __set_current_state((interruptible) ? 
+                                           TASK_INTERRUPTIBLE :
+                                           TASK_UNINTERRUPTIBLE);
+               }                       
+               if (interruptible && signal_pending(current)) {
+                       ret = -EAGAIN;
+                       break;
+               }
+       }
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&fc->fence_queue, &entry);
+       return ret;
+}
+EXPORT_SYMBOL(drm_fence_wait_polling);
+
 /*
  * Typically called by the IRQ handler.
  */
@@ -39,27 +90,14 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
 {
        int wake = 0;
        uint32_t diff;
-       uint32_t relevant;
+       uint32_t relevant_type;
+       uint32_t new_type;
        struct drm_fence_manager *fm = &dev->fm;
        struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
        struct drm_fence_driver *driver = dev->driver->fence_driver;
        struct list_head *head;
        struct drm_fence_object *fence, *next;
        int found = 0;
-       int is_exe = (type & DRM_FENCE_TYPE_EXE);
-       int ge_last_exe;
-
-
-       diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
-
-       if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
-               fc->pending_exe_flush = 0;
-
-       diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
-       ge_last_exe = diff < driver->wrap_diff;
-
-       if (is_exe && ge_last_exe)
-               fc->last_exe_flush = sequence;
 
        if (list_empty(&fc->ring))
                return;
@@ -72,7 +110,7 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
                }
        }
 
-       fc->pending_flush &= ~type;
+       fc->waiting_types &= ~type;
        head = (found) ? &fence->ring : &fc->ring;
 
        list_for_each_entry_safe_reverse(fence, next, head, ring) {
@@ -81,64 +119,60 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
 
                if (error) {
                        fence->error = error;
-                       fence->signaled = fence->type;
-                       fence->submitted_flush = fence->type;
-                       fence->flush_mask = fence->type;
+                       fence->signaled_types = fence->type;
                        list_del_init(&fence->ring);
                        wake = 1;
                        break;
                }
 
-               if (is_exe)
-                       type |= fence->native_type;
+               if (type & DRM_FENCE_TYPE_EXE)
+                       type |= fence->native_types;
 
-               relevant = type & fence->type;
+               relevant_type = type & fence->type;
+               new_type = (fence->signaled_types | relevant_type) & 
+                       ~fence->signaled_types;
 
-               if ((fence->signaled | relevant) != fence->signaled) {
-                       fence->signaled |= relevant;
-                       fence->flush_mask |= relevant;
-                       fence->submitted_flush |= relevant;
+               if (new_type) {
+                       fence->signaled_types |= new_type;
                        DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
-                                 fence->base.hash.key, fence->signaled);
-                       wake = 1;
-               }
+                                 fence->base.hash.key, fence->signaled_types);
+
+                       if (driver->needed_flush)
+                               fc->pending_flush |= driver->needed_flush(fence);
 
-               relevant = fence->flush_mask &
-                       ~(fence->submitted_flush | fence->signaled);
+                       if (new_type & fence->waiting_types)
+                               wake = 1;
+               }
 
-               fc->pending_flush |= relevant;
-               fence->submitted_flush |= relevant;
+               fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
 
-               if (!(fence->type & ~fence->signaled)) {
+               if (!(fence->type & ~fence->signaled_types)) {
                        DRM_DEBUG("Fence completely signaled 0x%08lx\n",
                                  fence->base.hash.key);
                        list_del_init(&fence->ring);
                }
-
        }
 
        /*
-        * Reinstate lost flush flags.
+        * Reinstate lost waiting types.
         */
 
-       if ((fc->pending_flush & type) != type) {
+       if ((fc->waiting_types & type) != type) {
                head = head->prev;
                list_for_each_entry(fence, head, ring) {
                        if (&fence->ring == &fc->ring)
                                break;
-                       diff = (fc->last_exe_flush - fence->sequence) &
+                       diff = (fc->highest_waiting_sequence - fence->sequence) &
                                driver->sequence_mask;
                        if (diff > driver->wrap_diff)
                                break;
-
-                       relevant = fence->submitted_flush & ~fence->signaled;
-                       fc->pending_flush |= relevant;
+                       
+                       fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
                }
        }
 
-       if (wake) {
-               DRM_WAKEUP(&fc->fence_queue);
-       }
+       if (wake) 
+               wake_up_all(&fc->fence_queue);
 }
 EXPORT_SYMBOL(drm_fence_handler);
 
@@ -219,41 +253,28 @@ static void drm_fence_object_destroy(struct drm_file *priv,
        drm_fence_usage_deref_locked(&fence);
 }
 
-int drm_fence_object_signaled(struct drm_fence_object *fence,
-                             uint32_t mask, int poke_flush)
+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
 {
        unsigned long flags;
        int signaled;
        struct drm_device *dev = fence->dev;
        struct drm_fence_manager *fm = &dev->fm;
        struct drm_fence_driver *driver = dev->driver->fence_driver;
-
-       if (poke_flush)
-               driver->poke_flush(dev, fence->fence_class);
+       
+       mask &= fence->type;
        read_lock_irqsave(&fm->lock, flags);
-       signaled =
-           (fence->type & mask & fence->signaled) == (fence->type & mask);
+       signaled = (mask & fence->signaled_types) == mask;
        read_unlock_irqrestore(&fm->lock, flags);
-
+       if (!signaled && driver->poll) {
+               write_lock_irqsave(&fm->lock, flags);
+               driver->poll(dev, fence->fence_class, mask);
+               signaled = (mask & fence->signaled_types) == mask;
+               write_unlock_irqrestore(&fm->lock, flags);
+       }
        return signaled;
 }
 EXPORT_SYMBOL(drm_fence_object_signaled);
 
-static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
-                               struct drm_fence_driver *driver,
-                               uint32_t sequence)
-{
-       uint32_t diff;
-
-       if (!fc->pending_exe_flush) {
-               fc->exe_flush_sequence = sequence;
-               fc->pending_exe_flush = 1;
-       } else {
-               diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
-               if (diff < driver->wrap_diff)
-                       fc->exe_flush_sequence = sequence;
-       }
-}
 
 int drm_fence_object_flush(struct drm_fence_object *fence,
                           uint32_t type)
@@ -262,7 +283,10 @@ int drm_fence_object_flush(struct drm_fence_object *fence,
        struct drm_fence_manager *fm = &dev->fm;
        struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
        struct drm_fence_driver *driver = dev->driver->fence_driver;
-       unsigned long flags;
+       unsigned long irq_flags;
+       uint32_t saved_pending_flush;
+       uint32_t diff;
+       int call_flush;
 
        if (type & ~fence->type) {
                DRM_ERROR("Flush trying to extend fence type, "
@@ -270,24 +294,36 @@ int drm_fence_object_flush(struct drm_fence_object *fence,
                return -EINVAL;
        }
 
-       write_lock_irqsave(&fm->lock, flags);
-       fence->flush_mask |= type;
-       if ((fence->submitted_flush & fence->signaled)
-           == fence->submitted_flush) {
-               if ((fence->type & DRM_FENCE_TYPE_EXE) &&
-                   !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
-                       drm_fence_flush_exe(fc, driver, fence->sequence);
-                       fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
-               } else {
-                       fc->pending_flush |= (fence->flush_mask &
-                                             ~fence->submitted_flush);
-                       fence->submitted_flush = fence->flush_mask;
-               }
-       }
-       write_unlock_irqrestore(&fm->lock, flags);
-       driver->poke_flush(dev, fence->fence_class);
+       write_lock_irqsave(&fm->lock, irq_flags);
+       fence->waiting_types |= type;
+       fc->waiting_types |= fence->waiting_types;
+       diff = (fence->sequence - fc->highest_waiting_sequence) & 
+               driver->sequence_mask;
+
+       if (diff < driver->wrap_diff)
+               fc->highest_waiting_sequence = fence->sequence;
+
+       /*
+        * fence->waiting_types has changed. Determine whether
+        * we need to initiate some kind of flush as a result of this.
+        */
+
+       saved_pending_flush = fc->pending_flush;
+       if (driver->needed_flush) 
+               fc->pending_flush |= driver->needed_flush(fence);
+
+       if (driver->poll)
+               driver->poll(dev, fence->fence_class, fence->waiting_types);
+
+       call_flush = fc->pending_flush;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+
+       if (call_flush && driver->flush)
+               driver->flush(dev, fence->fence_class);
+
        return 0;
 }
+EXPORT_SYMBOL(drm_fence_object_flush);
 
 /*
  * Make sure old fence objects are signaled before their fence sequences are
@@ -299,90 +335,52 @@ void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
 {
        struct drm_fence_manager *fm = &dev->fm;
        struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-       struct drm_fence_driver *driver = dev->driver->fence_driver;
-       uint32_t old_sequence;
-       unsigned long flags;
        struct drm_fence_object *fence;
+       unsigned long irq_flags;
+       struct drm_fence_driver *driver = dev->driver->fence_driver;
+       int call_flush;
+
        uint32_t diff;
 
-       write_lock_irqsave(&fm->lock, flags);
-       old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
-       diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
+       write_lock_irqsave(&fm->lock, irq_flags);
 
-       if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
-               fc->pending_exe_flush = 1;
-               fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
-       }
-       write_unlock_irqrestore(&fm->lock, flags);
+       list_for_each_entry_reverse(fence, &fc->ring, ring) {
+               diff = (sequence - fence->sequence) & driver->sequence_mask;
+               if (diff <= driver->flush_diff)
+                       break;
+       
+               fence->waiting_types = fence->type;
+               fc->waiting_types |= fence->type;
 
-       mutex_lock(&dev->struct_mutex);
-       read_lock_irqsave(&fm->lock, flags);
+               if (driver->needed_flush)
+                       fc->pending_flush |= driver->needed_flush(fence);
+       }       
+       
+       if (driver->poll)
+               driver->poll(dev, fence_class, fc->waiting_types);
 
-       if (list_empty(&fc->ring)) {
-               read_unlock_irqrestore(&fm->lock, flags);
-               mutex_unlock(&dev->struct_mutex);
-               return;
-       }
-       fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
-       mutex_unlock(&dev->struct_mutex);
-       diff = (old_sequence - fence->sequence) & driver->sequence_mask;
-       read_unlock_irqrestore(&fm->lock, flags);
-       if (diff < driver->wrap_diff)
-               drm_fence_object_flush(fence, fence->type);
-       drm_fence_usage_deref_unlocked(&fence);
-}
-EXPORT_SYMBOL(drm_fence_flush_old);
+       call_flush = fc->pending_flush;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
 
-static int drm_fence_lazy_wait(struct drm_fence_object *fence,
-                              int ignore_signals,
-                              uint32_t mask)
-{
-       struct drm_device *dev = fence->dev;
-       struct drm_fence_manager *fm = &dev->fm;
-       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-       int signaled;
-       unsigned long _end = jiffies + 3*DRM_HZ;
-       int ret = 0;
+       if (call_flush && driver->flush)
+               driver->flush(dev, fence->fence_class);
+
+       /*
+        * FIXME: Shold we implement a wait here for really old fences?
+        */
 
-       do {
-               DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
-                           (signaled = drm_fence_object_signaled(fence, mask, 1)));
-               if (signaled)
-                       return 0;
-               if (time_after_eq(jiffies, _end))
-                       break;
-       } while (ret == -EINTR && ignore_signals);
-       if (drm_fence_object_signaled(fence, mask, 0))
-               return 0;
-       if (time_after_eq(jiffies, _end))
-               ret = -EBUSY;
-       if (ret) {
-               if (ret == -EBUSY) {
-                       DRM_ERROR("Fence timeout. "
-                                 "GPU lockup or fence driver was "
-                                 "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
-                                 fence->fence_class,
-                                 fence->sequence,
-                                 fence->type,
-                                 mask,
-                                 fence->signaled);
-                       DRM_ERROR("Pending exe flush %d 0x%08x\n",
-                                 fc->pending_exe_flush,
-                                 fc->exe_flush_sequence);
-               }
-               return ((ret == -EINTR) ? -EAGAIN : ret);
-       }
-       return 0;
 }
+EXPORT_SYMBOL(drm_fence_flush_old);
 
 int drm_fence_object_wait(struct drm_fence_object *fence,
                          int lazy, int ignore_signals, uint32_t mask)
 {
        struct drm_device *dev = fence->dev;
        struct drm_fence_driver *driver = dev->driver->fence_driver;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
        int ret = 0;
-       unsigned long _end;
-       int signaled;
+       unsigned long _end = 3 * DRM_HZ;
 
        if (mask & ~fence->type) {
                DRM_ERROR("Wait trying to extend fence type"
@@ -391,58 +389,39 @@ int drm_fence_object_wait(struct drm_fence_object *fence,
                return -EINVAL;
        }
 
-       if (drm_fence_object_signaled(fence, mask, 0))
-               return 0;
+       if (driver->wait)
+               return driver->wait(fence, lazy, !ignore_signals, mask);
 
-       _end = jiffies + 3 * DRM_HZ;
 
        drm_fence_object_flush(fence, mask);
+       if (driver->has_irq(dev, fence->fence_class, mask)) {
+               if (!ignore_signals)
+                       ret = wait_event_interruptible_timeout
+                               (fc->fence_queue, 
+                                drm_fence_object_signaled(fence, mask), 
+                                3 * DRM_HZ);
+               else 
+                       ret = wait_event_timeout
+                               (fc->fence_queue, 
+                                drm_fence_object_signaled(fence, mask), 
+                                3 * DRM_HZ);
+
+               if (unlikely(ret == -ERESTARTSYS))
+                       return -EAGAIN;
+
+               if (unlikely(ret == 0))
+                       return -EBUSY;
 
-       if (lazy && driver->lazy_capable) {
-
-               ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
-               if (ret)
-                       return ret;
-
-       } else {
-
-               if (driver->has_irq(dev, fence->fence_class,
-                                   DRM_FENCE_TYPE_EXE)) {
-                       ret = drm_fence_lazy_wait(fence, ignore_signals,
-                                                 DRM_FENCE_TYPE_EXE);
-                       if (ret)
-                               return ret;
-               }
-
-               if (driver->has_irq(dev, fence->fence_class,
-                                   mask & ~DRM_FENCE_TYPE_EXE)) {
-                       ret = drm_fence_lazy_wait(fence, ignore_signals,
-                                                 mask);
-                       if (ret)
-                               return ret;
-               }
-       }
-       if (drm_fence_object_signaled(fence, mask, 0))
                return 0;
+       }
 
-       /*
-        * Avoid kernel-space busy-waits.
-        */
-       if (!ignore_signals)
-               return -EAGAIN;
-
-       do {
-               schedule();
-               signaled = drm_fence_object_signaled(fence, mask, 1);
-       } while (!signaled && !time_after_eq(jiffies, _end));
-
-       if (!signaled)
-               return -EBUSY;
-
-       return 0;
+       return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
+                                     _end);
 }
 EXPORT_SYMBOL(drm_fence_object_wait);
 
+
+
 int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
                          uint32_t fence_class, uint32_t type)
 {
@@ -452,25 +431,24 @@ int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
        struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
        unsigned long flags;
        uint32_t sequence;
-       uint32_t native_type;
+       uint32_t native_types;
        int ret;
 
        drm_fence_unring(dev, &fence->ring);
        ret = driver->emit(dev, fence_class, fence_flags, &sequence,
-                          &native_type);
+                          &native_types);
        if (ret)
                return ret;
 
        write_lock_irqsave(&fm->lock, flags);
        fence->fence_class = fence_class;
        fence->type = type;
-       fence->flush_mask = 0x00;
-       fence->submitted_flush = 0x00;
-       fence->signaled = 0x00;
+       fence->waiting_types = 0;
+       fence->signaled_types = 0;
        fence->sequence = sequence;
-       fence->native_type = native_type;
+       fence->native_types = native_types;
        if (list_empty(&fc->ring))
-               fc->last_exe_flush = sequence - 1;
+               fc->highest_waiting_sequence = sequence - 1;
        list_add_tail(&fence->ring, &fc->ring);
        write_unlock_irqrestore(&fm->lock, flags);
        return 0;
@@ -500,9 +478,8 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
        INIT_LIST_HEAD(&fence->base.list);
        fence->fence_class = fence_class;
        fence->type = type;
-       fence->flush_mask = 0;
-       fence->submitted_flush = 0;
-       fence->signaled = 0;
+       fence->signaled_types = 0;
+       fence->waiting_types = 0;
        fence->sequence = 0;
        fence->dev = dev;
        write_unlock_irqrestore(&fm->lock, flags);
@@ -598,7 +575,7 @@ void drm_fence_fill_arg(struct drm_fence_object *fence,
        arg->handle = fence->base.hash.key;
        arg->fence_class = fence->fence_class;
        arg->type = fence->type;
-       arg->signaled = fence->signaled;
+       arg->signaled = fence->signaled_types;
        arg->error = fence->error;
        arg->sequence = fence->sequence;
        read_unlock_irqrestore(&fm->lock, irq_flags);
index a2d10b5..c35d900 100644 (file)
@@ -147,12 +147,11 @@ struct drm_fence_object {
 
        struct list_head ring;
        int fence_class;
-       uint32_t native_type;
+       uint32_t native_types;
        uint32_t type;
-       uint32_t signaled;
+       uint32_t signaled_types;
        uint32_t sequence;
-       uint32_t flush_mask;
-       uint32_t submitted_flush;
+       uint32_t waiting_types;
        uint32_t error;
 };
 
@@ -162,10 +161,9 @@ struct drm_fence_object {
 struct drm_fence_class_manager {
        struct list_head ring;
        uint32_t pending_flush;
+       uint32_t waiting_types;
        wait_queue_head_t fence_queue;
-       int pending_exe_flush;
-       uint32_t last_exe_flush;
-       uint32_t exe_flush_sequence;
+       uint32_t highest_waiting_sequence;
 };
 
 struct drm_fence_manager {
@@ -177,19 +175,49 @@ struct drm_fence_manager {
 };
 
 struct drm_fence_driver {
+       unsigned long *waiting_jiffies;
        uint32_t num_classes;
        uint32_t wrap_diff;
        uint32_t flush_diff;
        uint32_t sequence_mask;
-       int lazy_capable;
+
+       /*
+        * Driver implemented functions:
+        * has_irq() : 1 if the hardware can update the indicated type_flags using an
+        * irq handler. 0 if polling is required.
+        *
+        * emit() : Emit a sequence number to the command stream.
+        * Return the sequence number.
+        *
+        * flush() : Make sure the flags indicated in fc->pending_flush will eventually
+        * signal for fc->highest_received_sequence and all preceding sequences.
+        * Acknowledge by clearing the flags fc->pending_flush.
+        *
+        * poll() : Call drm_fence_handler with any new information.
+        *
+        * needed_flush() : Given the current state of the fence->type flags and previusly 
+        * executed or queued flushes, return the type_flags that need flushing.
+        *
+        * wait(): Wait for the "mask" flags to signal on a given fence, performing
+        * whatever's necessary to make this happen.
+        */
+
        int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
                        uint32_t flags);
        int (*emit) (struct drm_device *dev, uint32_t fence_class,
                     uint32_t flags, uint32_t *breadcrumb,
                     uint32_t *native_type);
-       void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
+       void (*flush) (struct drm_device *dev, uint32_t fence_class);
+       void (*poll) (struct drm_device *dev, uint32_t fence_class,
+               uint32_t types);
+       uint32_t (*needed_flush) (struct drm_fence_object *fence);
+       int (*wait) (struct drm_fence_object *fence, int lazy,
+                    int interruptible, uint32_t mask);
 };
 
+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
+                                 int interruptible, uint32_t mask,
+                                 unsigned long end_jiffies);
 extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
                              uint32_t sequence, uint32_t type,
                              uint32_t error);
@@ -200,7 +228,7 @@ extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
 extern int drm_fence_object_flush(struct drm_fence_object *fence,
                                  uint32_t type);
 extern int drm_fence_object_signaled(struct drm_fence_object *fence,
-                                    uint32_t type, int flush);
+                                    uint32_t type);
 extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
 extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
 extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
@@ -576,6 +604,33 @@ struct drm_bo_driver {
         * ttm_cache_flush
         */
        void (*ttm_cache_flush)(struct drm_ttm *ttm);
+
+       /*
+        * command_stream_barrier
+        *
+        * @dev: The drm device.
+        *
+        * @bo: The buffer object to validate.
+        *
+        * @new_fence_class: The new fence class for the buffer object.
+        *
+        * @new_fence_type: The new fence type for the buffer object.
+        *
+        * @no_wait: whether this should give up and return -EBUSY
+        * if this operation would require sleeping
+        *
+        * Insert a command stream barrier that makes sure that the
+        * buffer is idle once the commands associated with the
+        * current validation are starting to execute. If an error
+        * condition is returned, or the function pointer is NULL,
+        * the drm core will force buffer idle
+        * during validation.
+        */
+
+       int (*command_stream_barrier) (struct drm_buffer_object *bo,
+                                      uint32_t new_fence_class,
+                                      uint32_t new_fence_type,
+                                      int no_wait);                                   
 };
 
 /*
index 5f2e6ad..ccc061d 100644 (file)
@@ -39,17 +39,9 @@ static struct pci_device_id pciidlist[] = {
 };
 
 #ifdef I915_HAVE_FENCE
-static struct drm_fence_driver i915_fence_driver = {
-       .num_classes = 1,
-       .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
-       .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
-       .sequence_mask = BREADCRUMB_MASK,
-       .lazy_capable = 1,
-       .emit = i915_fence_emit_sequence,
-       .poke_flush = i915_poke_flush,
-       .has_irq = i915_fence_has_irq,
-};
+extern struct drm_fence_driver i915_fence_driver;
 #endif
+
 #ifdef I915_HAVE_BUFFER
 
 static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
@@ -67,6 +59,7 @@ static struct drm_bo_driver i915_bo_driver = {
        .evict_flags = i915_evict_flags,
        .move = i915_move,
        .ttm_cache_flush = i915_flush_ttm,
+       .command_stream_barrier = NULL,
 };
 #endif
 
index e3c76df..8a2e7f1 100644 (file)
 #include "i915_drv.h"
 
 /*
- * Implements an intel sync flush operation.
+ * Initiate a sync flush if it's not already pending.
  */
 
-static void i915_perform_flush(struct drm_device *dev)
+static void i915_initiate_rwflush(struct drm_i915_private *dev_priv, 
+                                 struct drm_fence_class_manager *fc)
+{
+       if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && 
+           !dev_priv->flush_pending) {
+               dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
+               dev_priv->flush_flags = fc->pending_flush;
+               dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
+               I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+               dev_priv->flush_pending = 1;
+               fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
+       }
+}
+
+static void i915_fence_flush(struct drm_device *dev,
+                            uint32_t fence_class)
+{
+       struct drm_i915_private *dev_priv = 
+               (struct drm_i915_private *) dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+       unsigned long irq_flags;
+
+       if (unlikely(!dev_priv))
+               return;
+
+       write_lock_irqsave(&fm->lock, irq_flags);
+       i915_initiate_rwflush(dev_priv, fc);
+       write_unlock_irqrestore(&fm->lock, irq_flags);
+}
+
+static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
+                           uint32_t waiting_types)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_fence_manager *fm = &dev->fm;
        struct drm_fence_class_manager *fc = &fm->fence_class[0];
-       struct drm_fence_driver *driver = dev->driver->fence_driver;
        uint32_t flush_flags = 0;
        uint32_t flush_sequence = 0;
        uint32_t i_status;
-       uint32_t diff;
        uint32_t sequence;
-       int rwflush;
 
-       if (!dev_priv)
+       if (unlikely(!dev_priv))
                return;
 
-       if (fc->pending_exe_flush) {
-               sequence = READ_BREADCRUMB(dev_priv);
+       /*
+        * First, report any executed sync flush:
+        */
+
+       if (dev_priv->flush_pending) {
+               i_status = READ_HWSP(dev_priv, 0);
+               if ((i_status & (1 << 12)) !=
+                   (dev_priv->saved_flush_status & (1 << 12))) {
+                       flush_flags = dev_priv->flush_flags;
+                       flush_sequence = dev_priv->flush_sequence;
+                       dev_priv->flush_pending = 0;
+                       drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
+               }
+       }               
 
-               /*
-                * First update fences with the current breadcrumb.
-                */
+       /*
+        * Report A new breadcrumb, and adjust IRQs.
+        */
+
+       if (waiting_types & DRM_FENCE_TYPE_EXE) {
+               sequence = READ_BREADCRUMB(dev_priv);
 
-               diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
-               if (diff < driver->wrap_diff && diff != 0) {
-                       drm_fence_handler(dev, 0, sequence,
+               if (sequence != dev_priv->reported_sequence ||
+                   !dev_priv->reported_sequence_valid) {
+                       drm_fence_handler(dev, 0, sequence, 
                                          DRM_FENCE_TYPE_EXE, 0);
+                       dev_priv->reported_sequence = sequence;
+                       dev_priv->reported_sequence_valid = 1;
                }
 
-               if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
+               if (dev_priv->fence_irq_on && !(waiting_types & DRM_FENCE_TYPE_EXE)) {
                        i915_user_irq_off(dev_priv);
                        dev_priv->fence_irq_on = 0;
-               } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
+               } else if (!dev_priv->fence_irq_on && (waiting_types & DRM_FENCE_TYPE_EXE)) {
                        i915_user_irq_on(dev_priv);
                        dev_priv->fence_irq_on = 1;
                }
        }
 
-       if (dev_priv->flush_pending) {
-               i_status = READ_HWSP(dev_priv, 0);
-               if ((i_status & (1 << 12)) !=
-                   (dev_priv->saved_flush_status & (1 << 12))) {
-                       flush_flags = dev_priv->flush_flags;
-                       flush_sequence = dev_priv->flush_sequence;
-                       dev_priv->flush_pending = 0;
-                       drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
-               }
-       }
+       /*
+        * There may be new RW flushes pending. Start them.
+        */
+       
+       i915_initiate_rwflush(dev_priv, fc); 
 
-       rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
-       if (rwflush && !dev_priv->flush_pending) {
-               dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
-               dev_priv->flush_flags = fc->pending_flush;
-               dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
-               I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
-               dev_priv->flush_pending = 1;
-               fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
-       }
+       /*
+        * And possibly, but unlikely, they finish immediately. 
+        */
 
        if (dev_priv->flush_pending) {
                i_status = READ_HWSP(dev_priv, 0);
-               if ((i_status & (1 << 12)) !=
-                   (dev_priv->saved_flush_status & (1 << 12))) {
+               if (unlikely((i_status & (1 << 12)) !=
+                   (dev_priv->saved_flush_status & (1 << 12)))) {
                        flush_flags = dev_priv->flush_flags;
                        flush_sequence = dev_priv->flush_sequence;
                        dev_priv->flush_pending = 0;
                        drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);
                }
        }
-
 }
 
-void i915_poke_flush(struct drm_device *dev, uint32_t class)
-{
-       struct drm_fence_manager *fm = &dev->fm;
-       unsigned long flags;
-
-       write_lock_irqsave(&fm->lock, flags);
-       i915_perform_flush(dev);
-       write_unlock_irqrestore(&fm->lock, flags);
-}
-
-int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
                             uint32_t flags, uint32_t *sequence,
                             uint32_t *native_type)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       if (!dev_priv)
+       if (unlikely(!dev_priv))
                return -EINVAL;
 
        i915_emit_irq(dev);
@@ -140,20 +164,130 @@ int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
 void i915_fence_handler(struct drm_device *dev)
 {
        struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
 
        write_lock(&fm->lock);
-       i915_perform_flush(dev);
+       i915_fence_poll(dev, 0, fc->waiting_types);
        write_unlock(&fm->lock);
 }
 
-int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+/*
+ * We need a separate wait function since we need to poll for
+ * sync flushes.
+ */
+
+static int i915_fence_wait(struct drm_fence_object *fence,
+                          int lazy, int interruptible, uint32_t mask)
 {
+       struct drm_device *dev = fence->dev;
+       drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
+       int ret;
+       unsigned long  _end = jiffies + 3 * DRM_HZ;
+
+       drm_fence_object_flush(fence, mask);
+       if (likely(interruptible))
+               ret = wait_event_interruptible_timeout
+                       (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
+                        3 * DRM_HZ);
+       else 
+               ret = wait_event_timeout
+                       (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
+                        3 * DRM_HZ);
+
+       if (unlikely(ret == -ERESTARTSYS))
+               return -EAGAIN;
+
+       if (unlikely(ret == 0))
+               return -EBUSY;
+
+       if (likely(mask == DRM_FENCE_TYPE_EXE || 
+                  drm_fence_object_signaled(fence, mask))) 
+               return 0;
+
        /*
-        * We have an irq that tells us when we have a new breadcrumb.
+        * Remove this code snippet when fixed. HWSTAM doesn't let
+        * flush info through...
         */
 
-       if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
-               return 1;
+       if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
+               unsigned long irq_flags;
 
-       return 0;
+               DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
+               msleep(100);
+               dev_priv->flush_pending = 0;
+               write_lock_irqsave(&fm->lock, irq_flags);
+               drm_fence_handler(dev, fence->fence_class, 
+                                 fence->sequence, fence->type, 0);
+               write_unlock_irqrestore(&fm->lock, irq_flags);
+       }
+
+       /*
+        * Poll for sync flush completion.
+        */
+
+       return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
+}
+
+static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
+{
+       uint32_t flush_flags = fence->waiting_types & 
+               ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
+
+       if (likely(flush_flags == 0 || 
+                  ((flush_flags & ~fence->native_types) == 0) || 
+                  (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
+               return 0;
+       else {
+               struct drm_device *dev = fence->dev;
+               struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+               struct drm_fence_driver *driver = dev->driver->fence_driver;
+               
+               if (unlikely(!dev_priv))
+                       return 0;
+
+               if (dev_priv->flush_pending) {
+                       uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & 
+                               driver->sequence_mask;
+
+                       if (diff < driver->wrap_diff)
+                               return 0;
+               }
+       }
+       return flush_flags;
+}
+
+/*
+ * In the very unlikely event that "poll" is not really called very often
+ * we need the following function to handle sequence wraparounds.
+ */
+
+void i915_invalidate_reported_sequence(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = (struct drm_i915_private *) 
+               dev->dev_private;
+       struct drm_fence_manager *fm = &dev->fm;
+       unsigned long irq_flags;
+
+       if (unlikely(!dev_priv))
+               return;
+       
+       write_lock_irqsave(&fm->lock, irq_flags);
+       dev_priv->reported_sequence_valid = 0;
+       write_unlock_irqrestore(&fm->lock, irq_flags);
 }
+       
+
+struct drm_fence_driver i915_fence_driver = {
+       .num_classes = 1,
+       .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
+       .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
+       .sequence_mask = BREADCRUMB_MASK,
+       .has_irq = NULL,
+       .emit = i915_fence_emit_sequence,
+       .flush = i915_fence_flush,
+       .poll = i915_fence_poll,
+       .needed_flush = i915_fence_needed_flush,
+       .wait = i915_fence_wait,
+};
index a652bb1..1154931 100644 (file)
@@ -294,5 +294,6 @@ struct drm_bo_driver nouveau_bo_driver = {
        .init_mem_type = nouveau_bo_init_mem_type,
        .evict_flags = nouveau_bo_evict_flags,
        .move = nouveau_bo_move,
-       .ttm_cache_flush= nouveau_bo_flush_ttm
+       .ttm_cache_flush= nouveau_bo_flush_ttm,
+       .command_stream_barrier = NULL
 };
index 4e624a7..59dcf7d 100644 (file)
@@ -75,7 +75,7 @@ nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
 }
 
 static void
-nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
+nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
@@ -83,42 +83,26 @@ nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
        uint32_t pending_types = 0;
 
        DRM_DEBUG("class=%d\n", class);
-
-       pending_types = fc->pending_flush |
-                       ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-       DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types,
-                                             fc->pending_flush);
+       DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
 
        if (pending_types) {
                uint32_t sequence = NV_READ(chan->ref_cnt);
 
                DRM_DEBUG("got 0x%08x\n", sequence);
-               drm_fence_handler(dev, class, sequence, pending_types, 0);
+               drm_fence_handler(dev, class, sequence, waiting_types, 0);
        }
 }
 
-static void
-nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class)
-{
-       struct drm_fence_manager *fm = &dev->fm;
-       unsigned long flags;
-
-       DRM_DEBUG("class=%d\n", class);
-
-       write_lock_irqsave(&fm->lock, flags);
-       nouveau_fence_perform_flush(dev, class);
-       write_unlock_irqrestore(&fm->lock, flags);
-}
-
 void
 nouveau_fence_handler(struct drm_device *dev, int channel)
 {
        struct drm_fence_manager *fm = &dev->fm;
+       struct drm_fence_class_manager *fc = &fm->fence_class[channel];
 
        DRM_DEBUG("class=%d\n", channel);
 
        write_lock(&fm->lock);
-       nouveau_fence_perform_flush(dev, channel);
+       nouveau_fence_poll(dev, channel, fc->waiting_types);
        write_unlock(&fm->lock);
 }
 
@@ -127,8 +111,10 @@ struct drm_fence_driver nouveau_fence_driver = {
        .wrap_diff      = (1 << 30),
        .flush_diff     = (1 << 29),
        .sequence_mask  = 0xffffffffU,
-       .lazy_capable   = 1,
        .has_irq        = nouveau_fence_has_irq,
        .emit           = nouveau_fence_emit,
-       .poke_flush     = nouveau_fence_poke_flush
+       .flush          = NULL,
+       .poll           = nouveau_fence_poll,
+       .needed_flush   = NULL,
+       .wait           = NULL
 };
index 9af1bf3..3a680a3 100644 (file)
  * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
  */
 
-
-static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
+static void via_fence_poll(struct drm_device *dev, uint32_t class,
+                          uint32_t waiting_types)
 {
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
-       uint32_t pending_flush_types = 0;
        uint32_t signaled_flush_types = 0;
        uint32_t status;
 
        if (class != 0)
-               return 0;
+               return;
 
-       if (!dev_priv)
-               return 0;
+       if (unlikely(!dev_priv))
+               return;
 
        spin_lock(&dev_priv->fence_lock);
-
-       pending_flush_types = fc->pending_flush |
-               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
-       if (pending_flush_types) {
+       if (waiting_types) {
 
                /*
                 * Take the idlelock. This guarantees that the next time a client tries
@@ -77,7 +71,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
                 * Check if AGP command reader is idle.
                 */
 
-               if (pending_flush_types & DRM_FENCE_TYPE_EXE)
+               if (waiting_types & DRM_FENCE_TYPE_EXE)
                        if (VIA_READ(0x41C) & 0x80000000)
                                signaled_flush_types |= DRM_FENCE_TYPE_EXE;
 
@@ -85,7 +79,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
                 * Check VRAM command queue empty and 2D + 3D engines idle.
                 */
 
-               if (pending_flush_types & DRM_VIA_FENCE_TYPE_ACCEL) {
+               if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) {
                        status = VIA_READ(VIA_REG_STATUS);
                        if ((status & VIA_VR_QUEUE_BUSY) &&
                            !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
@@ -93,8 +87,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
                }
 
                if (signaled_flush_types) {
-                       pending_flush_types &= ~signaled_flush_types;
-                       if (!pending_flush_types && dev_priv->have_idlelock) {
+                       waiting_types &= ~signaled_flush_types;
+                       if (!waiting_types && dev_priv->have_idlelock) {
                                drm_idlelock_release(&dev->lock);
                                dev_priv->have_idlelock = 0;
                        }
@@ -105,8 +99,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
 
        spin_unlock(&dev_priv->fence_lock);
 
-       return fc->pending_flush |
-               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+       return;
 }
 
 
@@ -114,8 +107,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
  * Emit a fence sequence.
  */
 
-int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
-                            uint32_t * sequence, uint32_t * native_type)
+static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
+                                  uint32_t * sequence, uint32_t * native_type)
 {
        drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
        int ret = 0;
@@ -150,36 +143,6 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl
 }
 
 /**
- * Manual poll (from the fence manager).
- */
-
-void via_poke_flush(struct drm_device * dev, uint32_t class)
-{
-       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       struct drm_fence_manager *fm = &dev->fm;
-       unsigned long flags;
-       uint32_t pending_flush;
-
-       if (!dev_priv)
-               return ;
-
-       write_lock_irqsave(&fm->lock, flags);
-       pending_flush = via_perform_flush(dev, class);
-       if (pending_flush)
-               pending_flush = via_perform_flush(dev, class);
-       write_unlock_irqrestore(&fm->lock, flags);
-
-       /*
-        * Kick the timer if there are more flushes pending.
-        */
-
-       if (pending_flush && !timer_pending(&dev_priv->fence_timer)) {
-               dev_priv->fence_timer.expires = jiffies + 1;
-               add_timer(&dev_priv->fence_timer);
-       }
-}
-
-/**
  * No irq fence expirations implemented yet.
  * Although both the HQV engines and PCI dmablit engines signal
  * idle with an IRQ, we haven't implemented this yet.
@@ -187,45 +150,20 @@ void via_poke_flush(struct drm_device * dev, uint32_t class)
  * unless the caller wanting to wait for a fence object has indicated a lazy wait.
  */
 
-int via_fence_has_irq(struct drm_device * dev, uint32_t class,
-                     uint32_t flags)
+static int via_fence_has_irq(struct drm_device * dev, uint32_t class,
+                            uint32_t flags)
 {
        return 0;
 }
 
-/**
- * Regularly call the flush function. This enables lazy waits, so we can
- * set lazy_capable. Lazy waits don't really care when the fence expires,
- * so a timer tick delay should be fine.
- */
-
-void via_fence_timer(unsigned long data)
-{
-       struct drm_device *dev = (struct drm_device *) data;
-       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-       struct drm_fence_manager *fm = &dev->fm;
-       uint32_t pending_flush;
-       struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
-
-       if (!dev_priv)
-               return;
-       if (!fm->initialized)
-               goto out_unlock;
-
-       via_poke_flush(dev, 0);
-       pending_flush = fc->pending_flush |
-               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
-       /*
-        * disable timer if there are no more flushes pending.
-        */
-
-       if (!pending_flush && timer_pending(&dev_priv->fence_timer)) {
-               BUG_ON(dev_priv->have_idlelock);
-               del_timer(&dev_priv->fence_timer);
-       }
-       return;
-out_unlock:
-       return;
-
-}
+struct drm_fence_driver via_fence_driver = {
+       .num_classes = 1,
+       .wrap_diff = (1 << 30),
+       .flush_diff = (1 << 20),
+       .sequence_mask = 0xffffffffU,
+       .has_irq = via_fence_has_irq,
+       .emit = via_fence_emit_sequence,
+       .poll = via_fence_poll,
+       .needed_flush = NULL,
+       .wait = NULL
+};
index 4f0b4ed..f0225f8 100644 (file)
@@ -37,16 +37,7 @@ static struct pci_device_id pciidlist[] = {
        xgi_PCI_IDS
 };
 
-static struct drm_fence_driver xgi_fence_driver = {
-       .num_classes = 1,
-       .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
-       .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
-       .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
-       .lazy_capable = 1,
-       .emit = xgi_fence_emit_sequence,
-       .poke_flush = xgi_poke_flush,
-       .has_irq = xgi_fence_has_irq
-};
+extern struct drm_fence_driver xgi_fence_driver;
 
 int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
 
index 9a75581..63ed29e 100644 (file)
 #include "xgi_misc.h"
 #include "xgi_cmdlist.h"
 
-static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
+static void xgi_fence_poll(struct drm_device * dev, uint32_t class, 
+                          uint32_t waiting_types)
 {
        struct xgi_info * info = dev->dev_private;
-       struct drm_fence_class_manager * fc = &dev->fm.fence_class[class];
-       uint32_t pending_flush_types = 0;
-       uint32_t signaled_flush_types = 0;
+       uint32_t signaled_types = 0;
 
 
        if ((info == NULL) || (class != 0))
-               return 0;
+               return;
 
        DRM_SPINLOCK(&info->fence_lock);
 
-       pending_flush_types = fc->pending_flush |
-               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
-
-       if (pending_flush_types) {
-               if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
+       if (waiting_types) {
+               if (waiting_types & DRM_FENCE_TYPE_EXE) {
                        const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
                                                        0x2820))
                                & BEGIN_BEGIN_IDENTIFICATION_MASK;
 
                        if (begin_id != info->complete_sequence) {
                                info->complete_sequence = begin_id;
-                               signaled_flush_types |= DRM_FENCE_TYPE_EXE;
+                               signaled_types |= DRM_FENCE_TYPE_EXE;
                        }
                }
 
-               if (signaled_flush_types) {
+               if (signaled_types) {
                        drm_fence_handler(dev, 0, info->complete_sequence,
-                                         signaled_flush_types, 0);
+                                         signaled_types, 0);
                }
        }
 
        DRM_SPINUNLOCK(&info->fence_lock);
-
-       return fc->pending_flush |
-               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
 }
 
 
@@ -98,25 +91,13 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
 }
 
 
-void xgi_poke_flush(struct drm_device * dev, uint32_t class)
-{
-       struct drm_fence_manager * fm = &dev->fm;
-       unsigned long flags;
-
-
-       write_lock_irqsave(&fm->lock, flags);
-       xgi_do_flush(dev, class);
-       write_unlock_irqrestore(&fm->lock, flags);
-}
-
-
 void xgi_fence_handler(struct drm_device * dev)
 {
        struct drm_fence_manager * fm = &dev->fm;
-
+       struct drm_fence_class_manager *fc = &fm->fence_class[0];
 
        write_lock(&fm->lock);
-       xgi_do_flush(dev, 0);
+       xgi_fence_poll(dev, 0, fc->waiting_types);
        write_unlock(&fm->lock);
 }
 
@@ -125,3 +106,17 @@ int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
 {
        return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
 }
+
+struct drm_fence_driver xgi_fence_driver = {
+       .num_classes = 1,
+       .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
+       .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
+       .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
+       .has_irq = xgi_fence_has_irq,
+       .emit = xgi_fence_emit_sequence,
+       .flush = NULL,
+       .poll = xgi_fence_poll,
+       .needed_flush = NULL,
+       .wait = NULL
+};
+
index 287e95a..15fb811 100644 (file)
@@ -422,6 +422,9 @@ void i915_emit_breadcrumb(struct drm_device *dev)
        RING_LOCALS;
 
        if (++dev_priv->counter > BREADCRUMB_MASK) {
+#ifdef I915_HAVE_FENCE
+               i915_invalidate_reported_sequence(dev);
+#endif
                 dev_priv->counter = 1;
                 DRM_DEBUG("Breadcrumb counter wrapped around\n");
        }
@@ -1113,7 +1116,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
                        fence_arg->handle = fence->base.hash.key;
                        fence_arg->fence_class = fence->fence_class;
                        fence_arg->type = fence->type;
-                       fence_arg->signaled = fence->signaled;
+                       fence_arg->signaled = fence->signaled_types;
                }
        }
        drm_fence_usage_deref_unlocked(&fence);
index c92758f..3f6c806 100644 (file)
@@ -136,6 +136,8 @@ typedef struct drm_i915_private {
        uint32_t flush_flags;
        uint32_t flush_pending;
        uint32_t saved_flush_status;
+       uint32_t reported_sequence;
+       int reported_sequence_valid;
 #endif
 #ifdef I915_HAVE_BUFFER
        void *agp_iomap;
@@ -290,15 +292,9 @@ extern void i915_mem_release(struct drm_device * dev,
                             struct mem_block *heap);
 #ifdef I915_HAVE_FENCE
 /* i915_fence.c */
-
-
 extern void i915_fence_handler(struct drm_device *dev);
-extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
-                                   uint32_t flags,
-                                   uint32_t *sequence,
-                                   uint32_t *native_type);
-extern void i915_poke_flush(struct drm_device *dev, uint32_t class);
-extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
+
 #endif
 
 #ifdef I915_HAVE_BUFFER
index a802e4a..dd632c3 100644 (file)
@@ -40,17 +40,9 @@ static struct pci_device_id pciidlist[] = {
 
 
 #ifdef VIA_HAVE_FENCE
-static struct drm_fence_driver via_fence_driver = {
-       .num_classes = 1,
-       .wrap_diff = (1 << 30),
-       .flush_diff = (1 << 20),
-       .sequence_mask = 0xffffffffU,
-       .lazy_capable = 1,
-       .emit = via_fence_emit_sequence,
-       .poke_flush = via_poke_flush,
-       .has_irq = via_fence_has_irq,
-};
+extern struct drm_fence_driver via_fence_driver;
 #endif
+
 #ifdef VIA_HAVE_BUFFER
 
 /**
@@ -76,6 +68,8 @@ static struct drm_bo_driver via_bo_driver = {
        .init_mem_type = via_init_mem_type,
        .evict_flags = via_evict_flags,
        .move = NULL,
+       .ttm_cache_flush = NULL,
+       .command_stream_barrier = NULL
 };
 #endif
 
index 8dd4a72..941a2d7 100644 (file)
@@ -196,17 +196,6 @@ extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq
 extern void via_init_dmablit(struct drm_device *dev);
 #endif
 
-#ifdef VIA_HAVE_FENCE
-extern void via_fence_timer(unsigned long data);
-extern void via_poke_flush(struct drm_device * dev, uint32_t class);
-extern int via_fence_emit_sequence(struct drm_device * dev, uint32_t class,
-                                  uint32_t flags,
-                                  uint32_t * sequence,
-                                  uint32_t * native_type);
-extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
-                            uint32_t flags);
-#endif
-
 #ifdef VIA_HAVE_BUFFER
 extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
 extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
index 11bfa55..5493436 100644 (file)
@@ -69,9 +69,6 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
        dev_priv->emit_0_sequence = 0;
        dev_priv->have_idlelock = 0;
        spin_lock_init(&dev_priv->fence_lock);
-       init_timer(&dev_priv->fence_timer);
-       dev_priv->fence_timer.function = &via_fence_timer;
-       dev_priv->fence_timer.data = (unsigned long) dev;
 #endif /* VIA_HAVE_FENCE */
        dev->dev_private = (void *)dev_priv;
 #ifdef VIA_HAVE_BUFFER