Validation and fencing.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 31 Aug 2006 19:42:29 +0000 (21:42 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 31 Aug 2006 19:42:29 +0000 (21:42 +0200)
linux-core/drmP.h
linux-core/drm_bo.c
linux-core/i915_buffer.c
linux-core/i915_drv.c
linux-core/i915_fence.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h

index 01e3c66..5242b28 100644 (file)
@@ -656,6 +656,8 @@ typedef struct drm_bo_driver{
         int cached_vram;
        drm_ttm_backend_t *(*create_ttm_backend_entry) 
                (struct drm_device *dev, int cached);
+       int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
+       int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
 } drm_bo_driver_t;
 
 
@@ -783,7 +785,9 @@ typedef struct drm_buffer_manager{
        struct list_head vram_lru;
        struct list_head unfenced;
        struct list_head ddestroy;
+        struct list_head other;
         struct timer_list timer;
+        uint32_t fence_flags;
 } drm_buffer_manager_t;
 
 
@@ -975,12 +979,15 @@ typedef struct drm_buffer_object{
        struct list_head ddestroy;
 
        uint32_t fence_flags;
+        uint32_t fence_class;
        drm_fence_object_t *fence;
-       int unfenced;
-       wait_queue_head_t validate_queue;
+        uint32_t priv_flags;
+       wait_queue_head_t event_queue;
         struct mutex mutex;
 } drm_buffer_object_t;
 
+#define _DRM_BO_FLAG_UNFENCED 0x00000001
+#define _DRM_BO_FLAG_EVICTED  0x00000002
 
 
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
index e251326..8bca2e3 100644 (file)
 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
 }
 
-
-int drm_fence_buffer_objects(drm_file_t * priv)
-{
-       drm_device_t *dev = priv->head->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
-
-       drm_buffer_object_t *entry, *next;
-       uint32_t fence_flags = 0;
-       int count = 0;
-       drm_fence_object_t *fence;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-
-       list_for_each_entry(entry, &bm->unfenced, head) {
-               BUG_ON(!entry->unfenced);
-               fence_flags |= entry->fence_flags;
-               count++;
-       }
-
-       if (!count) {
-               mutex_unlock(&dev->struct_mutex);
-               return 0;
-       }
-
-       fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
-
-       if (!fence) {
-               mutex_unlock(&dev->struct_mutex);
-               return -ENOMEM;
-       }
-
-       ret = drm_fence_object_init(dev, fence_flags, 1, fence);
-       if (ret) {
-               drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
-
-       list_for_each_entry_safe(entry, next, &bm->unfenced, head) {
-               BUG_ON(entry->fence);
-               entry->unfenced = 0;
-               entry->fence = fence;
-               list_del_init(&entry->head);
-
-               if (!(entry->flags & DRM_BO_FLAG_NO_EVICT)) {
-                       if (entry->flags & DRM_BO_FLAG_MEM_TT) {
-                               list_add_tail(&entry->head, &bm->tt_lru);
-                       } else if (entry->flags & DRM_BO_FLAG_MEM_VRAM) {
-                               list_add_tail(&entry->head, &bm->vram_lru);
-                       }
-               }
-       }
-
-       atomic_add(count - 1, &fence->usage);
-       mutex_unlock(&dev->struct_mutex);
-       return 0;
-}
-
 /*
  * bo locked.
  */
@@ -135,11 +76,12 @@ static int drm_move_tt_to_local(drm_buffer_object_t * buf)
        buf->tt = NULL;
        mutex_unlock(&dev->struct_mutex);
 
+       buf->flags &= ~DRM_BO_MASK_MEM;
+       buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+
        return 0;
 }
 
-
-
 /*
  * Lock dev->struct_mutex
  */
@@ -149,6 +91,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
 
        drm_buffer_manager_t *bm = &dev->bm;
 
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (bo->fence) {
                if (!drm_fence_object_signaled(bo->fence, bo->fence_flags)) {
                        drm_fence_object_flush(dev, bo->fence, bo->fence_flags);
@@ -158,7 +101,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
                                bm->timer.expires = jiffies + 1;
                                add_timer(&bm->timer);
                        }
-                               
+
                        return;
                } else {
                        drm_fence_usage_deref_locked(dev, bo->fence);
@@ -170,7 +113,8 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
         * Take away from lru lists.
         */
 
-       list_del_init(&bo->head);
+       list_del(&bo->head);
+       list_add_tail(&bo->head, &bm->other);
 
        if (bo->tt) {
                int ret;
@@ -190,10 +134,10 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
        drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
 }
 
-static void drm_bo_delayed_delete(drm_device_t *dev)
+static void drm_bo_delayed_delete(drm_device_t * dev)
 {
        drm_buffer_manager_t *bm = &dev->bm;
-       
+
        drm_buffer_object_t *entry, *next;
        drm_fence_object_t *fence;
 
@@ -202,7 +146,7 @@ static void drm_bo_delayed_delete(drm_device_t *dev)
        list_for_each_entry_safe(entry, next, &bm->ddestroy, ddestroy) {
                fence = entry->fence;
 
-               if (fence && drm_fence_object_signaled(fence, 
+               if (fence && drm_fence_object_signaled(fence,
                                                       entry->fence_flags)) {
                        drm_fence_usage_deref_locked(dev, fence);
                        entry->fence = NULL;
@@ -217,13 +161,11 @@ static void drm_bo_delayed_delete(drm_device_t *dev)
        mutex_unlock(&dev->struct_mutex);
 }
 
-
-static void
-drm_bo_delayed_timer(unsigned long data)
+static void drm_bo_delayed_timer(unsigned long data)
 {
-       drm_device_t *dev = (drm_device_t *)data;
+       drm_device_t *dev = (drm_device_t *) data;
        drm_buffer_manager_t *bm = &dev->bm;
-       
+
        drm_bo_delayed_delete(dev);
        mutex_lock(&dev->struct_mutex);
        if (!list_empty(&bm->ddestroy) && !timer_pending(&bm->timer)) {
@@ -233,7 +175,6 @@ drm_bo_delayed_timer(unsigned long data)
        mutex_unlock(&dev->struct_mutex);
 }
 
-
 void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
 {
        if (atomic_dec_and_test(&bo->usage)) {
@@ -258,6 +199,103 @@ void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
        }
 }
 
+int drm_fence_buffer_objects(drm_file_t * priv, 
+                            struct list_head *list, 
+                            drm_fence_object_t *fence)
+{
+       drm_device_t *dev = priv->head->dev;
+       drm_buffer_manager_t *bm = &dev->bm;
+
+       drm_buffer_object_t *entry;
+       uint32_t fence_flags = 0;
+       int count = 0;
+       int ret = 0;
+       struct list_head f_list, *l;
+
+       mutex_lock(&dev->struct_mutex);
+       
+       list_for_each_entry(entry, list, head) {
+               BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+               fence_flags |= entry->fence_flags;
+               count++;
+       }
+
+       if (!count)
+               goto out;
+
+       if (fence) {
+               if ((fence_flags & fence->type) != fence_flags) {
+                       DRM_ERROR("Given fence doesn't match buffers "
+                                 "on unfenced list.\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       } else {
+               fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
+
+               if (!fence) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               ret = drm_fence_object_init(dev, fence_flags, 1, fence);
+
+               if (ret) {
+                       drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+                       goto out;
+               }
+       }
+
+       /*
+        * Transfer to a private list before we release the dev->struct_mutex;
+        * This is so we don't get any new unfenced objects while fencing 
+        * these.
+        */
+
+       f_list = *list;
+       INIT_LIST_HEAD(list);
+
+       count = 0;
+       l = f_list.next;
+       while(l != &f_list) {
+               entry = list_entry(l, drm_buffer_object_t, head);
+               atomic_inc(&entry->usage);
+               mutex_unlock(&dev->struct_mutex);
+               mutex_lock(&entry->mutex);
+               mutex_lock(&dev->struct_mutex);
+
+               if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       count++;
+                       if (entry->fence) 
+                               drm_fence_usage_deref_locked(dev, entry->fence);
+                       entry->fence = fence;
+                       DRM_FLAG_MASKED(entry->priv_flags, 0, 
+                                       _DRM_BO_FLAG_UNFENCED);
+                       DRM_WAKEUP(&entry->event_queue);
+                       list_del_init(&entry->head);
+                       if (entry->flags & DRM_BO_FLAG_NO_EVICT)
+                               list_add_tail(&entry->head, &bm->other);
+                       else if (entry->flags & DRM_BO_FLAG_MEM_TT)
+                               list_add_tail(&entry->head, &bm->tt_lru);
+                       else if (entry->flags & DRM_BO_FLAG_MEM_VRAM)
+                               list_add_tail(&entry->head, &bm->vram_lru);
+                       else
+                               list_add_tail(&entry->head, &bm->other);
+               }
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_locked(dev, entry);
+               l = f_list.next;
+       }
+       if (!count)
+               drm_fence_usage_deref_locked(dev, fence);
+       else if (count > 1)
+               atomic_add(count - 1, &fence->usage);
+      out:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
+
 /*
  * Call bo->mutex locked.
  * Wait until the buffer is idle.
@@ -269,6 +307,7 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int no_wait)
        drm_fence_object_t *fence = bo->fence;
        int ret;
 
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
                drm_device_t *dev = bo->dev;
                if (drm_fence_object_signaled(fence, bo->fence_flags)) {
@@ -299,13 +338,15 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int no_wait)
 static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait)
 {
        int ret = 0;
-
+       drm_device_t *dev = bo->dev;
+       drm_buffer_manager_t *bm = &dev->bm;
        /*
         * Someone might have modified the buffer before we took the buffer mutex.
         */
 
        mutex_lock(&bo->mutex);
-       if (bo->unfenced || (bo->flags & DRM_BO_FLAG_NO_EVICT))
+       if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
+           || (bo->flags & DRM_BO_FLAG_NO_EVICT))
                goto out;
        if (tt && !bo->tt)
                goto out;
@@ -324,6 +365,12 @@ static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait)
                ret = drm_move_vram_to_local(bo);
        }
 #endif
+       mutex_lock(&dev->struct_mutex);
+       list_del(&bo->head);
+       list_add_tail(&bo->head, &bm->other);
+       mutex_unlock(&dev->struct_mutex);
+       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+                       _DRM_BO_FLAG_EVICTED);
       out:
        mutex_unlock(&bo->mutex);
        return ret;
@@ -356,11 +403,6 @@ int drm_bo_alloc_space(drm_buffer_object_t * buf, int tt, int no_wait)
 
                bo = list_entry(lru->next, drm_buffer_object_t, head);
 
-               /*
-                * No need to take dev->struct_mutex here, because bo->usage is at least 1 already,
-                * since it's on the lru list, and the dev->struct_mutex is held.
-                */
-
                atomic_inc(&bo->usage);
                mutex_unlock(&dev->struct_mutex);
                ret = drm_bo_evict(bo, tt, no_wait);
@@ -407,13 +449,26 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
                drm_mm_put_block(&bm->tt_manager, bo->tt);
        }
        mutex_unlock(&dev->struct_mutex);
-       return ret;
+       if (ret)
+               return ret;
+
+       if (bo->ttm_region->be->needs_cache_adjust(bo->ttm_region->be))
+               bo->flags &= ~DRM_BO_FLAG_CACHED;
+       bo->flags &= ~DRM_BO_MASK_MEM;
+       bo->flags |= DRM_BO_FLAG_MEM_TT;
+
+       if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+               ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
+               DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
+               DRM_ERROR("Warning: Could not flush read caches\n");
+       }
+
+       return 0;
 }
 
 static int drm_bo_new_flags(drm_bo_driver_t * driver,
                            uint32_t flags, uint32_t new_mask, uint32_t hint,
-                           int init, uint32_t * n_flags, 
-                           uint32_t *n_mask)
+                           int init, uint32_t * n_flags, uint32_t * n_mask)
 {
        uint32_t new_flags = 0;
        uint32_t new_props;
@@ -426,21 +481,23 @@ static int drm_bo_new_flags(drm_bo_driver_t * driver,
 
        if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
                if (((new_mask & DRM_BO_FLAG_MEM_TT) && !driver->cached_tt) &&
-                   ((new_mask & DRM_BO_FLAG_MEM_VRAM) && !driver->cached_vram)) {
+                   ((new_mask & DRM_BO_FLAG_MEM_VRAM)
+                    && !driver->cached_vram)) {
                        new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
                } else {
-                       if (!driver->cached_tt) 
+                       if (!driver->cached_tt)
                                new_flags &= DRM_BO_FLAG_MEM_TT;
-                       if (!driver->cached_vram) 
+                       if (!driver->cached_vram)
                                new_flags &= DRM_BO_FLAG_MEM_VRAM;
                }
        }
-                                                                                  
-       if ((new_mask & DRM_BO_FLAG_READ_CACHED) && 
+
+       if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
            !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
-               if ((new_mask & DRM_BO_FLAG_NO_EVICT) && 
+               if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
                    !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
-                       DRM_ERROR("Cannot read cached from a pinned VRAM / TT buffer\n");
+                       DRM_ERROR
+                           ("Cannot read cached from a pinned VRAM / TT buffer\n");
                        return -EINVAL;
                }
                new_mask &= ~(DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM);
@@ -449,7 +506,6 @@ static int drm_bo_new_flags(drm_bo_driver_t * driver,
        /*
         * Determine new memory location:
         */
-       
 
        if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
 
@@ -544,6 +600,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
 {
        drm_fence_object_t *fence = bo->fence;
 
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
                drm_device_t *dev = bo->dev;
                if (drm_fence_object_signaled(fence, bo->fence_flags)) {
@@ -562,12 +619,94 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
        return 0;
 }
 
+static int drm_bo_read_cached(drm_buffer_object_t * bo)
+{
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+                       _DRM_BO_FLAG_EVICTED);
+       return 0;
+}
+
+/*
+ * Wait until a buffer is unmapped.
+ */
+
+static int drm_bo_wait_unmapped(drm_buffer_object_t *bo, int no_wait)
+{
+       int ret = 0; 
+
+       if ((atomic_read(&bo->mapped) >= 0) && no_wait)
+               return -EBUSY;
+
+       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+                   atomic_read(&bo->mapped) == -1);
+
+       if (ret == -EINTR)
+               ret = -EAGAIN;
+       
+       return ret;
+}
+
+static int drm_bo_check_unfenced(drm_buffer_object_t *bo)
+{
+       int ret;
+
+       mutex_lock(&bo->mutex);
+       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
+
+/*
+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
+ * Until then, we cannot really do anything with it except delete it.
+ * The unfenced list is a PITA, and the operations
+ * 1) validating
+ * 2) submitting commands
+ * 3) fencing
+ * Should really be an atomic operation. 
+ * We now "solve" this problem by keeping
+ * the buffer "unfenced" after validating, but before fencing.
+ */
+
+static int drm_bo_wait_unfenced(drm_buffer_object_t *bo, int no_wait, 
+                               int eagain_if_wait)
+{
+       int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       unsigned long _end = jiffies + 3*DRM_HZ;
+
+       if (ret && no_wait)
+               return -EBUSY;
+       else if (!ret)
+               return 0;
+            
+       do {
+               mutex_unlock(&bo->mutex);
+               DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+                           !drm_bo_check_unfenced(bo));
+               mutex_lock(&bo->mutex);
+               if (ret == -EINTR)
+                       return -EAGAIN;
+               if (ret) {
+                       DRM_ERROR("Error waiting for buffer to become fenced\n");
+                       return ret;
+               }
+               ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       } while (ret && !time_after_eq(jiffies, _end));
+       if (ret) {
+               DRM_ERROR("Timeout waiting for buffer to become fenced\n");
+               return ret;
+       }
+       if (eagain_if_wait)
+               return -EAGAIN;
 
-static int drm_bo_read_cached(drm_buffer_object_t *bo) {
        return 0;
 }
 
 
+
+
+
 /*
  * Wait for buffer idle and register that we've mapped the buffer.
  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
@@ -575,7 +714,7 @@ static int drm_bo_read_cached(drm_buffer_object_t *bo) {
  * unregistered.
  */
 
-static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, 
+static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                                 uint32_t map_flags, int no_wait)
 {
        drm_buffer_object_t *bo;
@@ -590,6 +729,9 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                return -EINVAL;
 
        mutex_lock(&bo->mutex);
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+       if (ret)
+               goto out;
 
        /*
         * If this returns true, we are currently unmapped.
@@ -597,7 +739,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
         * be done without the bo->mutex held.
         */
 
-       while(1) {
+       while (1) {
                if (atomic_inc_and_test(&bo->mapped)) {
                        ret = drm_bo_wait(bo, 0, no_wait);
                        if (ret) {
@@ -608,41 +750,33 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                        if ((map_flags & DRM_BO_FLAG_READ) &&
                            (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
                            (!(bo->flags & DRM_BO_FLAG_CACHED))) {
-                               
                                drm_bo_read_cached(bo);
                        }
                        break;
-               } else {
-                       if ((map_flags & DRM_BO_FLAG_READ) &&
-                           (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
-                           (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+               } else if ((map_flags & DRM_BO_FLAG_READ) &&
+                          (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
+                          (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+                       
+                       /*
+                        * We are already mapped with different flags.
+                        * need to wait for unmap.
+                        */
+                       
+                       ret = drm_bo_wait_unmapped(bo, no_wait);
+                       if (ret)
+                               goto out;
 
-                               /*
-                                * We are already mapped with different flags.
-                                * need to wait for unmap.
-                                */
-                               
-                               if (no_wait) {
-                                       ret = -EBUSY;
-                                       goto out;
-                               }
-                               DRM_WAIT_ON(ret, bo->validate_queue, 3 * DRM_HZ,
-                                           atomic_read(&bo->mapped) == -1);
-                               if (ret == -EINTR)
-                                       ret = -EAGAIN;
-                               if (ret) 
-                                       goto out;
-                               continue;
-                       } 
+                       continue;
                }
+               break;
        }
-       
+
        mutex_lock(&dev->struct_mutex);
        ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
                if (atomic_add_negative(-1, &bo->mapped))
-                       DRM_WAKEUP(&bo->validate_queue);
+                       DRM_WAKEUP(&bo->event_queue);
 
        }
       out:
@@ -698,7 +832,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
        BUG_ON(action != _DRM_REF_TYPE1);
 
        if (atomic_add_negative(-1, &bo->mapped))
-               DRM_WAKEUP(&bo->validate_queue);
+               DRM_WAKEUP(&bo->event_queue);
 }
 
 /*
@@ -720,18 +854,9 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
         * Make sure we're not mapped.
         */
 
-       if (atomic_read(&bo->mapped) >= 0) {
-               if (no_wait)
-                       return -EBUSY;
-               else {
-                       DRM_WAIT_ON(ret, bo->validate_queue, 3 * DRM_HZ,
-                                   atomic_read(&bo->mapped) == -1);
-                       if (ret == -EINTR)
-                               return -EAGAIN;
-                       if (ret)
-                               return ret;
-               }
-       }
+       ret = drm_bo_wait_unmapped(bo, no_wait);
+       if (ret)
+               return ret;
 
        /*
         * Wait for outstanding fences.
@@ -752,8 +877,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
                drm_move_tt_to_local(bo);
        }
 
-       DRM_FLAG_MASKED(bo->flags, new_flags, DRM_BO_FLAG_MEM_TT);
-
        return 0;
 }
 
@@ -768,6 +891,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
        drm_device_t *dev = bo->dev;
        drm_buffer_manager_t *bm = &dev->bm;
        uint32_t flag_diff = (new_flags ^ bo->flags);
+       drm_bo_driver_t *driver = dev->driver->bo_driver;
 
        int ret;
 
@@ -785,6 +909,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
         * Check whether we need to move buffer.
         */
 
+       ret = driver->fence_type(new_flags, &bo->fence_flags, &bo->fence_class);
+       if (ret) {
+               DRM_ERROR("Driver did not support given buffer permissions\n");
+               return ret;
+       }
+
        if (flag_diff & DRM_BO_MASK_MEM) {
                mutex_lock(&dev->struct_mutex);
                ret = drm_bo_move_buffer(bo, new_flags, no_wait);
@@ -793,40 +923,72 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
                        return ret;
        }
 
-       if (flag_diff & DRM_BO_FLAG_NO_EVICT) {
-               mutex_lock(&dev->struct_mutex);
-               list_del_init(&bo->head);
-               if (!(new_flags & DRM_BO_FLAG_NO_EVICT)) {
-                       if (new_flags & DRM_BO_FLAG_MEM_TT) {
-                               list_add_tail(&bo->head, &bm->tt_lru);
-                       } else {
-                               list_add_tail(&bo->head, &bm->vram_lru);
-                       }
-               }
-               mutex_unlock(&dev->struct_mutex);
-               DRM_FLAG_MASKED(bo->flags, new_flags, DRM_BO_FLAG_NO_EVICT);
-       }
-
        if (move_unfenced) {
 
                /*
                 * Place on unfenced list.
                 */
 
+               DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                               _DRM_BO_FLAG_UNFENCED);
                mutex_lock(&dev->struct_mutex);
-               list_del_init(&bo->head);
+               list_del(&bo->head);
                list_add_tail(&bo->head, &bm->unfenced);
                mutex_unlock(&dev->struct_mutex);
+       } else {
+               mutex_lock(&dev->struct_mutex);
+               list_del(&bo->head);
+               if (new_flags & DRM_BO_FLAG_NO_EVICT)
+                       list_add_tail(&bo->head, &bm->other);
+               else if (new_flags & DRM_BO_FLAG_MEM_TT)
+                       list_add_tail(&bo->head, &bm->tt_lru);
+               else if (new_flags & DRM_BO_FLAG_MEM_VRAM)
+                       list_add_tail(&bo->head, &bm->vram_lru);
+               else
+                       list_add_tail(&bo->head, &bm->other);
+               DRM_FLAG_MASKED(bo->flags, new_flags, DRM_BO_FLAG_NO_EVICT);
        }
 
-       /*
-        * FIXME: Remove below.
-        */
-
        bo->flags = new_flags;
        return 0;
 }
 
+static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
+                                 uint32_t flags, uint32_t mask, uint32_t hint)
+{
+       drm_buffer_object_t *bo;
+       drm_device_t *dev = priv->head->dev;
+       int ret;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+       uint32_t new_flags;
+
+       bo = drm_lookup_buffer_object(priv, handle, 1);
+       if (!bo) {
+               return -EINVAL;
+       }
+
+       mutex_lock(&bo->mutex);
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+
+       if (ret)
+               goto out;
+
+       ret = drm_bo_new_flags(dev->driver->bo_driver, bo->flags, 
+                              (flags & mask) | (bo->flags & ~mask), hint,
+                              0, &new_flags, &bo->mask);
+
+       if (ret)
+               goto out;
+
+       ret = drm_buffer_object_validate(bo, new_flags, !(hint & DRM_BO_HINT_DONT_FENCE), 
+                                        no_wait);
+       
+out:                       
+       mutex_unlock(&bo->mutex);
+       drm_bo_usage_deref_unlocked(dev, bo);
+       return ret;
+}
+
 /*
  * Call bo->mutex locked.
  */
@@ -886,6 +1048,10 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo,
        return ret;
 }
 
+
+
+
+
 int drm_buffer_object_create(drm_file_t * priv,
                             unsigned long size,
                             drm_bo_type_t type,
@@ -896,6 +1062,7 @@ int drm_buffer_object_create(drm_file_t * priv,
                             drm_buffer_object_t ** buf_obj)
 {
        drm_device_t *dev = priv->head->dev;
+       drm_buffer_manager_t *bm = &dev->bm;
        drm_buffer_object_t *bo;
        int ret = 0;
        uint32_t new_flags;
@@ -922,13 +1089,15 @@ int drm_buffer_object_create(drm_file_t * priv,
 
        atomic_set(&bo->usage, 1);
        atomic_set(&bo->mapped, -1);
-       DRM_INIT_WAITQUEUE(&bo->validate_queue);
+       DRM_INIT_WAITQUEUE(&bo->event_queue);
        INIT_LIST_HEAD(&bo->head);
+       list_add_tail(&bo->head, &bm->other);
        INIT_LIST_HEAD(&bo->ddestroy);
        bo->dev = dev;
        bo->type = type;
        bo->num_pages = num_pages;
        bo->buffer_start = buffer_start;
+       bo->priv_flags = 0;
 
        ret = drm_bo_new_flags(dev->driver->bo_driver, bo->flags, mask, hint,
                               1, &new_flags, &bo->mask);
@@ -938,12 +1107,12 @@ int drm_buffer_object_create(drm_file_t * priv,
        if (ret)
                goto out_err;
 
-#if 0
+#if 1
        ret = drm_buffer_object_validate(bo, new_flags, 0,
                                         hint & DRM_BO_HINT_DONT_BLOCK);
 #else
        bo->flags = new_flags;
-#endif 
+#endif
        if (ret)
                goto out_err;
 
@@ -996,6 +1165,12 @@ static void drm_bo_fill_rep_arg(const drm_buffer_object_t * bo,
        rep->buffer_start = bo->buffer_start;
 }
 
+static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
+{
+       LOCK_TEST_WITH_RETURN(dev, filp);
+       return 0;
+}
+
 int drm_bo_ioctl(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
@@ -1005,43 +1180,43 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
        unsigned long next;
        drm_user_object_t *uo;
        drm_buffer_object_t *entry;
-
+       
        do {
-               DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, 
-                                        sizeof(arg));
-               
+               DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
                if (arg.handled) {
-                   data = req->next;
-                   continue;
+                       data = req->next;
+                       continue;
                }
 
                rep.ret = 0;
                switch (req->op) {
-               case drm_bo_create:{
-                               rep.ret =
-                                   drm_buffer_object_create(priv, req->size,
-                                                            req->type,
-                                                            req->arg_handle,
-                                                            req->mask,
-                                                            req->hint,
-                                                            req->buffer_start,
-                                                            &entry);
-                               if (rep.ret)
-                                       break;
-
-                               rep.ret =
-                                   drm_bo_add_user_object(priv, entry,
-                                                          req->
-                                                          mask &
-                                                          DRM_BO_FLAG_SHAREABLE);
-                               if (rep.ret)
-                                       drm_bo_usage_deref_unlocked(dev, entry);
-
-                               mutex_lock(&entry->mutex);
-                               drm_bo_fill_rep_arg(entry, &rep);
-                               mutex_unlock(&entry->mutex);
+               case drm_bo_create:
+                       rep.ret =
+                           drm_buffer_object_create(priv, req->size,
+                                                    req->type,
+                                                    req->arg_handle,
+                                                    req->mask,
+                                                    req->hint,
+                                                    req->buffer_start, &entry);
+                       if (rep.ret)
                                break;
-                       }
+
+                       rep.ret =
+                           drm_bo_add_user_object(priv, entry,
+                                                  req->
+                                                  mask &
+                                                  DRM_BO_FLAG_SHAREABLE);
+                       if (rep.ret)
+                               drm_bo_usage_deref_unlocked(dev, entry);
+
+                       if (rep.ret)
+                               break;
+
+                       mutex_lock(&entry->mutex);
+                       drm_bo_fill_rep_arg(entry, &rep);
+                       mutex_unlock(&entry->mutex);
+                       break;
                case drm_bo_unmap:
                        rep.ret = drm_buffer_object_unmap(priv, req->handle);
                        break;
@@ -1083,6 +1258,20 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
                        rep.ret = drm_user_object_unref(priv, req->handle,
                                                        drm_buffer_type);
                        break;
+               case drm_bo_validate:
+                       rep.ret = drm_bo_lock_test(dev, filp);
+                       if (rep.ret)
+                               break;
+                       rep.ret =
+                           drm_bo_handle_validate(priv, req->handle, req->mask,
+                                                  req->arg_handle, req->hint);
+                       break;
+               case drm_bo_fence:
+                       rep.ret = drm_bo_lock_test(dev, filp);
+                       if (rep.ret)
+                               break;
+                       /**/
+                       break;
                default:
                        rep.ret = -EINVAL;
                }
@@ -1104,12 +1293,12 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
        return 0;
 }
 
-
-
-static void drm_bo_clean_mm(drm_file_t * priv)
+int drm_bo_clean_mm(drm_file_t *priv)
 {
+       return 0;
 }
 
+
 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
@@ -1120,7 +1309,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
        drm_bo_driver_t *driver = dev->driver->bo_driver;
 
        if (!driver) {
-               DRM_ERROR("Buffer objects is not supported by this driver\n");
+               DRM_ERROR("Buffer objects are not su<pported by this driver\n");
                return -EINVAL;
        }
 
@@ -1164,11 +1353,12 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                INIT_LIST_HEAD(&bm->tt_lru);
                INIT_LIST_HEAD(&bm->unfenced);
                INIT_LIST_HEAD(&bm->ddestroy);
+               INIT_LIST_HEAD(&bm->other);
 
                init_timer(&bm->timer);
                bm->timer.function = &drm_bo_delayed_timer;
-               bm->timer.data = (unsigned long) dev;
-               
+               bm->timer.data = (unsigned long)dev;
+
                bm->initialized = 1;
                break;
        case mm_takedown:
@@ -1185,6 +1375,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                bm->initialized = 0;
                break;
        default:
+               DRM_ERROR("Function not implemented yet\n");
                return -EINVAL;
        }
 
index bedbd41..ecc6cf8 100644 (file)
@@ -30,6 +30,8 @@
  */
 
 #include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
 
 drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev, int cached)
 {
@@ -38,3 +40,25 @@ drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev, int cached)
        else
                return drm_agp_init_ttm_uncached(dev);
 }
+
+int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type)
+{
+       *class = 0;
+       if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) 
+               *type = 1;
+       else
+               *type = 3;
+       return 0;
+}
+
+int i915_invalidate_caches(drm_device_t *dev, uint32_t flags)
+{
+       uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
+
+       if (flags & DRM_BO_FLAG_READ)
+               flush_cmd |= MI_READ_FLUSH;
+       if (flags & DRM_BO_FLAG_EXE)
+               flush_cmd |= MI_EXE_FLUSH;
+
+       return i915_emit_mi_flush(dev, flush_cmd);
+}
index bc78dc2..fb4754d 100644 (file)
@@ -51,7 +51,9 @@ static drm_fence_driver_t i915_fence_driver = {
 static drm_bo_driver_t i915_bo_driver = {
        .cached_vram = 0,
        .cached_tt = 1,
-       .create_ttm_backend_entry = i915_create_ttm_backend_entry
+       .create_ttm_backend_entry = i915_create_ttm_backend_entry,
+       .fence_type = i915_fence_types,
+       .invalidate_caches = i915_invalidate_caches
 };
 
 
index 452d4ee..673ebd0 100644 (file)
@@ -121,3 +121,4 @@ void i915_fence_handler(drm_device_t * dev)
        i915_perform_flush(dev);
        write_unlock(&fm->lock);
 }
+
index 9640855..f76fd86 100644 (file)
@@ -723,7 +723,15 @@ typedef struct drm_ttm_arg {
 /* When creating a buffer, Avoid system storage even if allowed */
 #define DRM_BO_HINT_AVOID_LOCAL 0x00000001
 /* Don't block on validate and map */
-#define DRM_BO_HINT_DONT_BLOCK  0x00000002  
+#define DRM_BO_HINT_DONT_BLOCK  0x00000002
+/* Don't place this buffer on the unfenced list.*/
+#define DRM_BO_HINT_DONT_FENCE  0x00000004
+
+
+
+
+/* Driver specific flags. Could be for example rendering engine */  
+#define DRM_BO_MASK_DRIVER      0x00F00000
 
 typedef enum {
        drm_bo_type_ttm,
@@ -779,7 +787,9 @@ typedef union drm_mm_init_arg{
                enum {
                        mm_init,
                        mm_takedown,
-                       mm_query
+                       mm_query,
+                       mm_lock,
+                       mm_unlock
                } op;
                drm_u64_t vr_p_offset;
                drm_u64_t vr_p_size;
index e661269..baeab38 100644 (file)
@@ -444,6 +444,28 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
 #endif
 }
 
+
+int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t flush_cmd = CMD_MI_FLUSH;
+       RING_LOCALS;
+
+       flush_cmd |= flush;
+
+       i915_kernel_lost_context(dev);
+
+       BEGIN_LP_RING(4);
+       OUT_RING(flush_cmd);
+       OUT_RING(0);
+       OUT_RING(0);
+       OUT_RING(0);
+       ADVANCE_LP_RING();
+
+       return 0;
+}
+
+
 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
                                   drm_i915_cmdbuffer_t * cmd)
 {
@@ -680,6 +702,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS)
        return i915_dispatch_flip(dev);
 }
 
+
 static int i915_getparam(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
index 403124c..8bf82ed 100644 (file)
@@ -124,6 +124,8 @@ extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
 extern int i915_driver_device_is_agp(drm_device_t * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
+extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
+
 
 /* i915_irq.c */
 extern int i915_irq_emit(DRM_IOCTL_ARGS);
@@ -158,6 +160,8 @@ extern void i915_sync_flush(drm_device_t *dev);
 /* i915_buffer.c */
 extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev, 
        int cached);
+extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
+extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
 #endif
 
 #define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
@@ -209,6 +213,11 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
 #define INST_OP_FLUSH        0x02000000
 #define INST_FLUSH_MAP_CACHE 0x00000001
 
+#define CMD_MI_FLUSH         (0x04 << 23)
+#define MI_NO_WRITE_FLUSH    (1 << 2)
+#define MI_READ_FLUSH        (1 << 0)
+#define MI_EXE_FLUSH         (1 << 1)
+
 #define BB1_START_ADDR_MASK   (~0x7)
 #define BB1_PROTECTED         (1<<0)
 #define BB1_UNPROTECTED       (0<<0)