Various bugfixes.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 8 Sep 2006 15:24:38 +0000 (17:24 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 8 Sep 2006 15:24:38 +0000 (17:24 +0200)
12 files changed:
libdrm/xf86drm.c
linux-core/drmP.h
linux-core/drm_agpsupport.c
linux-core/drm_bo.c
linux-core/drm_compat.c
linux-core/drm_fence.c
linux-core/drm_ttm.c
linux-core/drm_ttm.h
linux-core/i915_fence.c
shared-core/drm.h
shared-core/i915_drv.h
shared-core/i915_irq.c

index dd97e26..298b812 100644 (file)
@@ -2512,6 +2512,7 @@ int drmBOResetList(drmBOList *list) {
        DRMLISTDEL(l);
        DRMLISTADD(l, &list->free);
        list->numOnList--;
+       l = list->list.next;
     }
     return drmAdjustListNodes(list);
 }
@@ -2603,8 +2604,8 @@ int drmBOCreate(int fd, drmTTM *ttm, unsigned long start, unsigned long size,
                unsigned hint, drmBO *buf)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
 
     arg.handled = 0;
     req->mask = mask;
@@ -2628,6 +2629,9 @@ int drmBOCreate(int fd, drmTTM *ttm, unsigned long start, unsigned long size,
        req->buffer_start = (unsigned long) user_buffer;
        buf->virtual = user_buffer;
        break;
+    case drm_bo_type_fake:
+        req->buffer_start = start;
+       break;
     default:
        return -EINVAL;
     }
@@ -2654,10 +2658,10 @@ int drmBOCreate(int fd, drmTTM *ttm, unsigned long start, unsigned long size,
 int drmBODestroy(int fd, drmBO *buf)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     
-    if (buf->mapVirtual) {
+    if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
        (void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
        buf->mapVirtual = NULL;
        buf->virtual = NULL;
@@ -2685,8 +2689,8 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
 {
 
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     
     arg.handled = 0;
     req->handle = handle;
@@ -2714,9 +2718,16 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
 int drmBOUnReference(int fd, drmBO *buf)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     
+
+    if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
+       (void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
+       buf->mapVirtual = NULL;
+       buf->virtual = NULL;
+    }
+
     arg.handled = 0;
     req->handle = buf->handle;
     req->op = drm_bo_unreference;
@@ -2746,23 +2757,25 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
 {
 
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     int ret = 0;
 
     /*
      * Make sure we have a virtual address of the buffer.
      */
 
-    if (!buf->virtual) {
+    if (!buf->virtual && buf->type != drm_bo_type_fake) {
        drmAddress virtual;
        ret = drmMap(fd, buf->mapHandle, buf->size + buf->start, &virtual);
        if (ret)
            return ret;
        buf->mapVirtual = virtual;
        buf->virtual = ((char *) virtual) + buf->start;
+#ifdef BODEBUG
        fprintf(stderr,"Mapvirtual, virtual: 0x%08x 0x%08x\n", 
                buf->mapVirtual, buf->virtual);
+#endif
     }
 
     arg.handled = 0;
@@ -2799,8 +2812,8 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
 int drmBOUnmap(int fd, drmBO *buf)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
 
        
     arg.handled = 0;
@@ -2823,8 +2836,8 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
                  unsigned hint)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     int ret = 0;
 
     arg.handled = 0;
@@ -2856,8 +2869,8 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
 int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     int ret = 0;
 
     arg.handled = 0;
@@ -2881,8 +2894,8 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
 int drmBOInfo(int fd, drmBO *buf)
 {
     drm_bo_arg_t arg;
-    drm_bo_arg_request_t *req = &arg.req;
-    drm_bo_arg_reply_t *rep = &arg.rep;
+    drm_bo_arg_request_t *req = &arg.d.req;
+    drm_bo_arg_reply_t *rep = &arg.d.rep;
     int ret = 0;
 
     arg.handled = 0;
@@ -2963,6 +2976,7 @@ int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
        cur->arg1 |= mask;
        cur->arg0 = (memFlags & flags) | ((cur->arg0 | flags) & cur->arg1);     
     }
+    return 0;
 }
 
 
@@ -2984,7 +2998,7 @@ int drmBOValidateList(int fd, drmBOList *list)
       node = DRMLISTENTRY(drmBONode, l, head);
 
       arg = &node->bo_arg;
-      req = &arg->req;
+      req = &arg->d.req;
 
       if (!first)
          first = arg;
@@ -2999,14 +3013,20 @@ int drmBOValidateList(int fd, drmBOList *list)
       req->op = drm_bo_validate;
       req->mask = node->arg0;
       req->hint = 0;
-      req->arg_handle = node->arg1;
+      req->arg_handle = node->arg1 | DRM_BO_MASK_MEM;
+#ifdef BODEBUG
+      fprintf(stderr, "Offset 0x%08x, Handle 0x%08x, " 
+                      "mask 0x%08x flags 0x%08x\n",
+                      node->buf->offset, 
+                      req->handle, req->arg_handle, req->mask);
+#endif
   }
   
   if (!first) 
       return 0;
 
   do{
-      ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+      ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
   } while (ret && errno == -EAGAIN);
 
 
@@ -3015,9 +3035,8 @@ int drmBOValidateList(int fd, drmBOList *list)
   
   for (l = list->list.next; l != &list->list; l = l->next) {
       node = DRMLISTENTRY(drmBONode, l, head);
-
       arg = &node->bo_arg;
-      rep = &arg->rep;
+      rep = &arg->d.rep;
       
       if (!arg->handled)
          return -EFAULT;
@@ -3026,6 +3045,10 @@ int drmBOValidateList(int fd, drmBOList *list)
 
       buf = node->buf;
       drmBOCopyReply(rep, buf);
+#ifdef BODEBUG
+      fprintf(stderr,"Offset 0x%08x, Flags 0x%08x\n", 
+             buf->offset, buf->flags);      
+#endif
   }
 
   return 0;
@@ -3051,7 +3074,7 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
       node = DRMLISTENTRY(drmBONode, l, head);
 
       arg = &node->bo_arg;
-      req = &arg->req;
+      req = &arg->d.req;
 
       if (!first)
          first = arg;
@@ -3071,7 +3094,7 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
   if (!first) 
       return 0;
 
-  ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+  ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
 
   if (ret)
       return -errno;
@@ -3080,7 +3103,7 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
       node = DRMLISTENTRY(drmBONode, l, head);
 
       arg = &node->bo_arg;
-      rep = &arg->rep;
+      rep = &arg->d.rep;
       
       if (!arg->handled)
          return -EFAULT;
index 7de7422..da14bdf 100644 (file)
@@ -801,7 +801,7 @@ typedef struct drm_buffer_manager{
        struct list_head unfenced;
        struct list_head ddestroy;
         struct list_head other;
-        struct timer_list timer;
+        struct work_struct wq;
         uint32_t fence_flags;
 } drm_buffer_manager_t;
 
index e7226f1..60ebc56 100644 (file)
@@ -630,6 +630,7 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
                }
                agp_free_memory(mem);
        }
+
        agp_priv->mem = NULL;
 }
 
index 68af5c3..74722b1 100644 (file)
@@ -98,10 +98,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
                        drm_fence_object_flush(dev, bo->fence, bo->fence_flags);
                        list_add_tail(&bo->ddestroy, &bm->ddestroy);
 
-                       if (!timer_pending(&bm->timer)) {
-                               bm->timer.expires = jiffies + 1;
-                               add_timer(&bm->timer);
-                       }
+                       schedule_delayed_work(&bm->wq, 2);
 
                        return;
                } else {
@@ -109,15 +106,14 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
                        bo->fence = NULL;
                }
        }
-
        /*
         * Take away from lru lists.
         */
 
-       list_del(&bo->head);
+       list_del_init(&bo->head);
 
        if (bo->tt) {
-               drm_unbind_ttm_region(bo->ttm_region);
+               drm_unbind_ttm_region(bo->ttm_region);
                drm_mm_put_block(&bm->tt_manager, bo->tt);
                bo->tt = NULL;
        }
@@ -152,7 +148,9 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
                        entry->fence = NULL;
                }
                if (!entry->fence) {
-                       DRM_DEBUG("Destroying delayed buffer object\n");
+#ifdef BODEBUG
+                       DRM_ERROR("Destroying delayed buffer object\n");
+#endif
                        list_del(&entry->ddestroy);
                        drm_bo_destroy_locked(dev, entry);
                }
@@ -161,16 +159,18 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static void drm_bo_delayed_timer(unsigned long data)
+static void drm_bo_delayed_workqueue(void *data)
 {
        drm_device_t *dev = (drm_device_t *) data;
        drm_buffer_manager_t *bm = &dev->bm;
 
+#ifdef BODEBUG
+       DRM_ERROR("Delayed delete Worker\n");
+#endif
        drm_bo_delayed_delete(dev);
-       mutex_lock(&dev->struct_mutex);
-       if (!list_empty(&bm->ddestroy) && !timer_pending(&bm->timer)) {
-               bm->timer.expires = jiffies + 1;
-               add_timer(&bm->timer);
+       mutex_lock(&dev->struct_mutex); 
+       if (!list_empty(&bm->ddestroy)) {
+               schedule_delayed_work(&bm->wq, 2);
        }
        mutex_unlock(&dev->struct_mutex);
 }
@@ -220,14 +220,29 @@ int drm_fence_buffer_objects(drm_file_t * priv,
 
        mutex_lock(&dev->struct_mutex);
 
+       if (!list)
+               list = &bm->unfenced;
+
        list_for_each_entry(entry, list, head) {
                BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
                fence_flags |= entry->fence_flags;
                count++;
        }
 
-       if (!count)
+       if (!count) {
+               DRM_ERROR("No buffers to fence\n");
+               ret = -EINVAL;
                goto out;
+       }
+
+       /*
+        * Transfer to a local list before we release the dev->struct_mutex;
+        * This is so we don't get any new unfenced objects while fencing 
+        * these.
+        */
+
+       list_add_tail(&f_list, list);
+       list_del_init(list);
 
        if (fence) {
                if ((fence_flags & fence->type) != fence_flags) {
@@ -237,20 +252,13 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                        goto out;
                }
        } else {
+               mutex_unlock(&dev->struct_mutex);
                ret = drm_fence_object_create(dev, fence_flags, 1, &fence);
+               mutex_lock(&dev->struct_mutex);
                if (ret)
                        goto out;               
        }
 
-       /*
-        * Transfer to a private list before we release the dev->struct_mutex;
-        * This is so we don't get any new unfenced objects while fencing 
-        * these.
-        */
-
-       f_list = *list;
-       INIT_LIST_HEAD(list);
-
        count = 0;
        l = f_list.next;
        while (l != &f_list) {
@@ -259,7 +267,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                mutex_unlock(&dev->struct_mutex);
                mutex_lock(&entry->mutex);
                mutex_lock(&dev->struct_mutex);
-
+               list_del_init(l);
                if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
                        count++;
                        if (entry->fence)
@@ -268,7 +276,6 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                        DRM_FLAG_MASKED(entry->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
                        DRM_WAKEUP(&entry->event_queue);
-                       list_del_init(&entry->head);
                        if (entry->flags & DRM_BO_FLAG_NO_EVICT)
                                list_add_tail(&entry->head, &bm->other);
                        else if (entry->flags & DRM_BO_FLAG_MEM_TT)
@@ -277,12 +284,19 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                                list_add_tail(&entry->head, &bm->vram_lru);
                        else
                                list_add_tail(&entry->head, &bm->other);
+               } else {
+#ifdef BODEBUG
+                   DRM_ERROR("Huh? Fenced object on unfenced list\n");
+#endif
                }
                mutex_unlock(&entry->mutex);
                drm_bo_usage_deref_locked(dev, entry);
                l = f_list.next;
        }
        atomic_add(count, &fence->usage);
+#ifdef BODEBUG
+       DRM_ERROR("Fenced %d buffers\n", count);
+#endif
       out:
        mutex_unlock(&dev->struct_mutex);
        *used_fence = fence;
@@ -303,7 +317,6 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
        drm_fence_object_t *fence = bo->fence;
        int ret;
 
-       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
                drm_device_t *dev = bo->dev;
                if (drm_fence_object_signaled(fence, bo->fence_flags)) {
@@ -424,6 +437,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * buf, int tt, int no_wait)
        } else {
                buf->vram = node;
        }
+       buf->offset = node->start * PAGE_SIZE;
        return 0;
 }
 
@@ -431,6 +445,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
 {
        drm_device_t *dev = bo->dev;
        drm_buffer_manager_t *bm = &dev->bm;
+       drm_ttm_backend_t *be;
        int ret;
 
        BUG_ON(bo->tt);
@@ -450,7 +465,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
        if (ret)
                return ret;
 
-       if (bo->ttm_region->be->needs_cache_adjust(bo->ttm_region->be))
+       be = bo->ttm_region->be;
+       if (be->needs_cache_adjust(be))
                bo->flags &= ~DRM_BO_FLAG_CACHED;
        bo->flags &= ~DRM_BO_MASK_MEM;
        bo->flags |= DRM_BO_FLAG_MEM_TT;
@@ -458,7 +474,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
        if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
                ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
                if (ret)
-                       DRM_ERROR("Warning: Could not flush read caches\n");
+                       DRM_ERROR("Could not flush read caches\n");
        }
        DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
 
@@ -776,12 +792,13 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
  */
 
 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
-                                uint32_t map_flags, int no_wait,
+                                uint32_t map_flags, unsigned hint,
                                 drm_bo_arg_reply_t * rep)
 {
        drm_buffer_object_t *bo;
        drm_device_t *dev = priv->head->dev;
        int ret = 0;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
 
        mutex_lock(&dev->struct_mutex);
        bo = drm_lookup_buffer_object(priv, handle, 1);
@@ -791,9 +808,11 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                return -EINVAL;
 
        mutex_lock(&bo->mutex);
-       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-       if (ret)
-               goto out;
+       if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
+               ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+               if (ret)
+                       goto out;
+       }
 
        /*
         * If this returns true, we are currently unmapped.
@@ -979,7 +998,11 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
         * Check whether we need to move buffer.
         */
 
-       if (flag_diff & DRM_BO_MASK_MEM) {
+       if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
+               if (bo->type == drm_bo_type_user) {
+                       DRM_ERROR("User buffers are not implemented yet.\n");
+                       return -EINVAL;
+               }
                ret = drm_bo_move_buffer(bo, new_flags, no_wait);
                if (ret)
                        return ret;
@@ -1151,7 +1174,7 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo,
                bo->ttm_object = to;
                ttm = drm_ttm_from_object(to);
                ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT,
-                                           bo->num_pages,1,
+                                           bo->num_pages, 0,
                                            
                                            /*                                      bo->mask & DRM_BO_FLAG_BIND_CACHED,*/
                                            &bo->ttm_region);
@@ -1177,9 +1200,11 @@ int drm_buffer_object_create(drm_file_t * priv,
        int ret = 0;
        uint32_t new_flags;
        unsigned long num_pages;
-
+       
        drm_bo_delayed_delete(dev);
-       if (buffer_start & ~PAGE_MASK) {
+
+       if ((buffer_start & ~PAGE_MASK) &&
+           (type != drm_bo_type_fake)) {
                DRM_ERROR("Invalid buffer object start.\n");
                return -EINVAL;
        }
@@ -1206,24 +1231,24 @@ int drm_buffer_object_create(drm_file_t * priv,
        bo->dev = dev;
        bo->type = type;
        bo->num_pages = num_pages;
-       bo->buffer_start = buffer_start;
+       if (bo->type == drm_bo_type_fake) {
+               bo->offset = buffer_start;
+               bo->buffer_start = 0;
+       } else {
+               bo->buffer_start = buffer_start;
+       }
        bo->priv_flags = 0;
        bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
        ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
                               1, &new_flags, &bo->mask);
-       DRM_ERROR("New flags: 0x%08x\n", new_flags);
        if (ret)
                goto out_err;
        ret = drm_bo_add_ttm(priv, bo, ttm_handle);
        if (ret)
                goto out_err;
 
-#if 1
        ret = drm_buffer_object_validate(bo, new_flags, 0,
                                         hint & DRM_BO_HINT_DONT_BLOCK);
-#else
-       bo->flags = new_flags;
-#endif
        if (ret)
                goto out_err;
 
@@ -1268,7 +1293,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
        drm_bo_arg_t arg;
-       drm_bo_arg_request_t *req = &arg.req;
+       drm_bo_arg_request_t *req = &arg.d.req;
        drm_bo_arg_reply_t rep;
        unsigned long next;
        drm_user_object_t *uo;
@@ -1321,8 +1346,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
                case drm_bo_map:
                        rep.ret = drm_buffer_object_map(priv, req->handle,
                                                        req->mask,
-                                                       req->hint &
-                                                       DRM_BO_HINT_DONT_BLOCK,
+                                                       req->hint,
                                                        &rep);
                        break;
                case drm_bo_destroy:
@@ -1394,10 +1418,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
                        return -EAGAIN;
 
                arg.handled = 1;
-               arg.rep = rep;
+               arg.d.rep = rep;
                DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
                data = next;
-
        } while (data);
        return 0;
 }
@@ -1409,17 +1432,22 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
 static void drm_bo_force_clean(drm_device_t * dev)
 {
        drm_buffer_manager_t *bm = &dev->bm;
-
-       drm_buffer_object_t *entry, *next;
+       struct list_head *l;
+       drm_buffer_object_t *entry;
        int nice_mode = 1;
        int ret = 0;
 
-       list_for_each_entry_safe(entry, next, &bm->ddestroy, ddestroy) {
+       l = bm->ddestroy.next;
+       while(l != &bm->ddestroy) {
+               entry = list_entry(l, drm_buffer_object_t, ddestroy);
+               list_del(l);
                if (entry->fence) {
                        if (nice_mode) {
                                unsigned long _end = jiffies + 3 * DRM_HZ;
                                do {
+                                       mutex_unlock(&dev->struct_mutex);
                                        ret = drm_bo_wait(entry, 0, 1, 0);
+                                       mutex_lock(&dev->struct_mutex);
                                } while ((ret == -EINTR) &&
                                         !time_after_eq(jiffies, _end));
                        } else {
@@ -1436,8 +1464,8 @@ static void drm_bo_force_clean(drm_device_t * dev)
 
                }
                DRM_DEBUG("Destroying delayed buffer object\n");
-               list_del(&entry->ddestroy);
                drm_bo_destroy_locked(dev, entry);
+               l = bm->ddestroy.next;
        }
 }
 
@@ -1541,11 +1569,9 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                INIT_LIST_HEAD(&bm->ddestroy);
                INIT_LIST_HEAD(&bm->other);
 
-               init_timer(&bm->timer);
-               bm->timer.function = &drm_bo_delayed_timer;
-               bm->timer.data = (unsigned long)dev;
-
+               INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
                bm->initialized = 1;
+
                break;
        case mm_takedown:
                if (drm_bo_clean_mm(dev)) {
index 86bae30..162e465 100644 (file)
@@ -59,9 +59,14 @@ static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
        do {
                if (pte_present(*pte)) {
                        pte_t ptent;
-                       ptent = *pte;
                        ptep_get_and_clear(mm, addr, pte);
+                       ptent = *pte;
                        lazy_mmu_prot_update(ptent);
+               } else {
+                       ptep_get_and_clear(mm, addr, pte);
+               }
+               if (!pte_none(*pte)) {
+                 DRM_ERROR("Ugh. Pte was presen\n");
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
        pte_unmap(pte - 1);
index fd43d8b..eaaf7f4 100644 (file)
@@ -111,6 +111,10 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
                relevant = type & fence->type;
                if ((fence->signaled | relevant) != fence->signaled) {
                        fence->signaled |= relevant;
+#ifdef BODEBUG
+                       DRM_ERROR("Fence 0x%08lx signaled 0x%08x\n",
+                                 fence->base.hash.key, fence->signaled);
+#endif
                        fence->submitted_flush |= relevant;
                        wake = 1;
                }
@@ -130,6 +134,10 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
                 */
 
                if (!(fence->type & ~fence->signaled)) {
+#ifdef BODEBUG
+                       DRM_ERROR("Fence completely signaled 0x%08lx\n",
+                                 fence->base.hash.key);
+#endif
                        fence_list = &fence->ring;
                        for (i = 0; i < driver->no_types; ++i) {
                                if (fm->fence_types[i] == fence_list)
@@ -172,6 +180,10 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
 {
        if (atomic_dec_and_test(&fence->usage)) {
                drm_fence_unring(dev, &fence->ring);
+#ifdef BODEBUG
+               DRM_ERROR("Destroyed a fence object 0x%08lx\n",
+                         fence->base.hash.key);
+#endif
                kmem_cache_free(drm_cache.fence_object, fence);
        }
 }
@@ -430,6 +442,9 @@ int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence,
                return ret;
        fence->base.type = drm_fence_type;
        fence->base.remove = &drm_fence_object_destroy;
+#ifdef BODEBUG
+       DRM_ERROR("Fence 0x%08lx created\n", fence->base.hash.key);
+#endif
        return 0;
 }
 EXPORT_SYMBOL(drm_fence_add_user_object);
index 26133f9..a83d640 100644 (file)
@@ -43,6 +43,38 @@ typedef struct drm_val_action {
 } drm_val_action_t;
 
 /*
+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
+ */
+
+
+static void *ttm_alloc(unsigned long size, int type, int *do_vmalloc)
+{
+       void *ret = NULL;
+
+       *do_vmalloc = 0;
+       if (size <= 4*PAGE_SIZE) {
+               ret = drm_alloc(size, type);
+       }
+       if (!ret) {
+               *do_vmalloc = 1;
+               ret = vmalloc(size);
+       }
+       return ret;
+}
+               
+static void ttm_free(void *pointer, unsigned long size, int type, 
+                    int do_vfree)
+{
+       if (!do_vfree) {
+               drm_free(pointer, size, type);
+       }else {
+               vfree(pointer);
+       }
+}
+
+
+
+/*
  * We may be manipulating other processes page tables, so for each TTM, keep track of 
  * which mm_structs are currently mapping the ttm so that we can take the appropriate
  * locks when we modify their page tables. A typical application is when we evict another
@@ -161,6 +193,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
        list_for_each(list, &ttm->vma_list->head) {
                drm_ttm_vma_list_t *entry =
                    list_entry(list, drm_ttm_vma_list_t, head);
+
                drm_clear_vma(entry->vma,
                              entry->vma->vm_start +
                              (page_offset << PAGE_SHIFT),
@@ -205,7 +238,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
                return -EBUSY;
        }
 
-       DRM_ERROR("Destroying a ttm\n");
+       DRM_DEBUG("Destroying a ttm\n");
        if (ttm->be_list) {
                list_for_each_safe(list, next, &ttm->be_list->head) {
                        drm_ttm_backend_list_t *entry =
@@ -231,12 +264,13 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
                        }
                }
                global_flush_tlb();
-               vfree(ttm->pages);
+               ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
+                        DRM_MEM_TTM, ttm->pages_vmalloc);
                ttm->pages = NULL;
        }
 
        if (ttm->page_flags) {
-               vfree(ttm->page_flags);
+               ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), DRM_MEM_TTM, ttm->pf_vmalloc);
                ttm->page_flags = NULL;
        }
 
@@ -280,7 +314,8 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
        ttm->destroy = 0;
        ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-       ttm->page_flags = vmalloc(ttm->num_pages * sizeof(*ttm->page_flags));
+       ttm->page_flags = ttm_alloc(ttm->num_pages * sizeof(*ttm->page_flags),
+                                   DRM_MEM_TTM, &ttm->pf_vmalloc);
        if (!ttm->page_flags) {
                drm_destroy_ttm(ttm);
                DRM_ERROR("Failed allocating page_flags table\n");
@@ -288,7 +323,8 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
        }
        memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags));
 
-       ttm->pages = vmalloc(ttm->num_pages * sizeof(*ttm->pages));
+       ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), 
+                              DRM_MEM_TTM, &ttm->pages_vmalloc);
        if (!ttm->pages) {
                drm_destroy_ttm(ttm);
                DRM_ERROR("Failed allocating page table\n");
@@ -483,12 +519,13 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
        uint32_t *cur_page_flags;
        int i;
 
-       DRM_ERROR("Destroying a TTM region\n");
+       DRM_DEBUG("Destroying a TTM region\n");
        list_del_init(&entry->head);
 
        drm_unbind_ttm_region(entry);
        if (be) {
                be->clear(entry->be);
+#if 0 /* Hmm, Isn't this done in unbind? */
                if (be->needs_cache_adjust(be)) {
                        int ret = drm_ttm_lock_mmap_sem(ttm);
                        drm_ttm_lock_mm(ttm, 0, 1);
@@ -500,6 +537,7 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
                        if (!ret)
                                drm_ttm_unlock_mm(ttm, 1, 0);
                }
+#endif
                be->destroy(be);
        }
        cur_page_flags = ttm->page_flags + entry->page_offset;
@@ -609,6 +647,12 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
                ret = drm_ttm_lock_mmap_sem(ttm);
                if (ret)
                        return ret;
+
+               drm_ttm_lock_mm(ttm, 0, 1);
+               unmap_vma_pages(ttm, region->page_offset,
+                               region->num_pages);
+               drm_ttm_unlock_mm(ttm, 0, 1);
+
                drm_set_caching(ttm, region->page_offset, region->num_pages,
                                DRM_TTM_PAGE_UNCACHED, 1);
        } else {
@@ -676,7 +720,9 @@ void drm_user_destroy_region(drm_ttm_backend_list_t * entry)
                        page_cache_release(*cur_page);
                        cur_page++;
                }
-               vfree(entry->anon_pages);
+               ttm_free(entry->anon_pages, 
+                        sizeof(*entry->anon_pages)*entry->anon_locked,
+                        DRM_MEM_TTM, entry->pages_vmalloc);
        }
 
        be->destroy(be);
@@ -721,7 +767,8 @@ int drm_user_create_region(drm_device_t * dev, unsigned long start, int len,
                return -EFAULT;
        }
 
-       tmp->anon_pages = vmalloc(sizeof(*(tmp->anon_pages)) * len);
+       tmp->anon_pages = ttm_alloc(sizeof(*(tmp->anon_pages)) * len,
+                                   DRM_MEM_TTM, &tmp->pages_vmalloc);
 
        if (!tmp->anon_pages) {
                drm_user_destroy_region(tmp);
index d647578..5c65e74 100644 (file)
@@ -70,6 +70,7 @@ typedef struct drm_ttm_backend_list {
        drm_file_t *anon_owner;
        struct page **anon_pages;
        int anon_locked;
+        int pages_vmalloc;
        enum {
                ttm_bound,
                ttm_evicted,
@@ -99,6 +100,8 @@ typedef struct drm_ttm {
        atomic_t vma_count;
        int mmap_sem_locked;
        int destroy;
+        int pages_vmalloc;
+        int pf_vmalloc;
 } drm_ttm_t;
 
 typedef struct drm_ttm_object {
index 80ef3ab..20e12d6 100644 (file)
@@ -55,13 +55,18 @@ static void i915_perform_flush(drm_device_t * dev)
                diff = sequence - fm->last_exe_flush;
                if (diff < driver->wrap_diff && diff != 0) {
                        drm_fence_handler(dev, sequence, DRM_FENCE_EXE);
-                       diff = sequence - fm->exe_flush_sequence;
-                       if (diff < driver->wrap_diff) {
-                               fm->pending_exe_flush = 0;
+               } 
+
+               diff = sequence - fm->exe_flush_sequence;
+               if (diff < driver->wrap_diff) {
+                       fm->pending_exe_flush = 0;
+                       if (dev_priv->fence_irq_on) {
                                i915_user_irq_off(dev_priv);
-                       } else {
-                               i915_user_irq_on(dev_priv);
+                               dev_priv->fence_irq_on = 0;
                        }
+               } else if (!dev_priv->fence_irq_on) {
+                       i915_user_irq_on(dev_priv);
+                       dev_priv->fence_irq_on = 1;
                }
        }
        if (dev_priv->flush_pending) {
@@ -82,8 +87,6 @@ static void i915_perform_flush(drm_device_t * dev)
                dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
                dev_priv->flush_flags = fm->pending_flush;
                dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
-               DRM_ERROR("Saved flush status is 0x%08x\n",
-                         dev_priv->saved_flush_status);
                I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
                dev_priv->flush_pending = 1;
                fm->pending_flush = 0;
index e39f888..bed5517 100644 (file)
@@ -728,7 +728,7 @@ typedef struct drm_ttm_arg {
 /* Don't place this buffer on the unfenced list.*/
 #define DRM_BO_HINT_DONT_FENCE  0x00000004
 #define DRM_BO_HINT_WAIT_LAZY   0x00000008
-
+#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
 
 
 /* Driver specific flags. Could be for example rendering engine */  
@@ -792,7 +792,7 @@ typedef struct drm_bo_arg{
        union {
                drm_bo_arg_request_t req;
                drm_bo_arg_reply_t rep;
-       };
+       } d;
 } drm_bo_arg_t;
 
 typedef union drm_mm_init_arg{
index f8d0874..2aa3b94 100644 (file)
@@ -105,6 +105,7 @@ typedef struct drm_i915_private {
        int vblank_pipe;
         spinlock_t user_irq_lock;
         int user_irq_refcount;
+        int fence_irq_on;
         uint32_t irq_enable_reg;
         int irq_enabled;
 
index a3f6a03..4a1b298 100644 (file)
@@ -106,7 +106,7 @@ void i915_user_irq_on(drm_i915_private_t *dev_priv)
 {
 
        spin_lock(&dev_priv->user_irq_lock);
-       if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount > 0)){
+       if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
                dev_priv->irq_enable_reg |= USER_INT_FLAG;
                I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
        }