Merge branch 'master' into modesetting-101
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 25 Oct 2007 09:00:45 +0000 (11:00 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 25 Oct 2007 09:00:45 +0000 (11:00 +0200)
Conflicts:

linux-core/Makefile.kernel
linux-core/drm_bo.c
linux-core/drm_objects.h

18 files changed:
libdrm/xf86drm.c
libdrm/xf86mm.h
linux-core/Makefile
linux-core/Makefile.kernel
linux-core/drm_bo.c
linux-core/drm_bo_lock.c [new file with mode: 0644]
linux-core/drm_compat.c
linux-core/drm_drv.c
linux-core/drm_objects.h
linux-core/drm_stub.c
linux-core/drm_vm.c
linux-core/i915_buffer.c
linux-core/intel_fb.c
linux-core/sis_mm.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_init.c

index bd92ed2..2f9d5c8 100644 (file)
@@ -2695,62 +2695,37 @@ int drmBOUnmap(int fd, drmBO *buf)
     return 0;
 }
 
-int drmBOValidate(int fd, drmBO *buf, uint32_t fence_class,
-                 uint64_t flags, uint64_t mask,
-                 unsigned hint)
+int drmBOSetStatus(int fd, drmBO *buf, 
+                  uint64_t flags, uint64_t mask,
+                  unsigned int hint, 
+                  unsigned int desired_tile_stride,
+                  unsigned int tile_info)
 {
-    struct drm_bo_op_arg arg;
-    struct drm_bo_op_req *req = &arg.d.req;
-    struct drm_bo_arg_rep *rep = &arg.d.rep;
+
+    struct drm_bo_map_wait_idle_arg arg;
+    struct drm_bo_info_req *req = &arg.d.req;
+    struct drm_bo_info_rep *rep = &arg.d.rep;
     int ret = 0;
 
     memset(&arg, 0, sizeof(arg));
-    req->bo_req.handle = buf->handle;
-    req->bo_req.flags = flags;
-    req->bo_req.mask = mask;
-    req->bo_req.hint = hint;
-    req->bo_req.fence_class = fence_class;
-    req->op = drm_bo_validate;
-
-    do{
-       ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
+    req->mask = mask;
+    req->flags = flags;
+    req->handle = buf->handle;
+    req->hint = hint;
+    req->desired_tile_stride = desired_tile_stride;
+    req->tile_info = tile_info;
+    
+    do {
+           ret = ioctl(fd, DRM_IOCTL_BO_SETSTATUS, &arg);
     } while (ret && errno == EAGAIN);
 
     if (ret) 
-       return -errno;
-    if (!arg.handled)
-       return -EFAULT;
-    if (rep->ret)
-       return rep->ret;
+           return -errno;
 
-    drmBOCopyReply(&rep->bo_info, buf);
-    return 0;
+    drmBOCopyReply(rep, buf);
 }
            
 
-int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
-{
-    struct drm_bo_op_arg arg;
-    struct drm_bo_op_req *req = &arg.d.req;
-    struct drm_bo_arg_rep *rep = &arg.d.rep;
-    int ret = 0;
-
-    memset(&arg, 0, sizeof(arg));
-    req->bo_req.handle = buf->handle;
-    req->bo_req.flags = flags;
-    req->arg_handle = fenceHandle;
-    req->op = drm_bo_fence;
-
-    ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
-    if (ret) 
-       return -errno;
-    if (!arg.handled)
-       return -EFAULT;
-    if (rep->ret)
-       return rep->ret;
-    return 0;
-}
-
 int drmBOInfo(int fd, drmBO *buf)
 {
     struct drm_bo_reference_info_arg arg;
@@ -2793,30 +2768,7 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
     }
     return 0;
 }
-
-int drmBOSetPin(int fd, drmBO *buf, int pin)
-{
-    struct drm_bo_set_pin_arg arg;
-    struct drm_bo_set_pin_req *req = &arg.d.req;
-    struct drm_bo_info_rep *rep = &arg.d.rep;
-    int ret = 0;
-
-    memset(&arg, 0, sizeof(arg));
-    req->handle = buf->handle;
-    req->pin = pin;
-
-    do {
-       ret = ioctl(fd, DRM_IOCTL_BO_SET_PIN, &arg);
-    } while (ret && errno == EAGAIN);
-
-    if (ret)
-       return -errno;
-
-    drmBOCopyReply(rep, buf);
-
-    return 0;
-}
-
+       
 int drmBOBusy(int fd, drmBO *buf, int *busy)
 {
     if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
@@ -2864,13 +2816,20 @@ int drmMMTakedown(int fd, unsigned memType)
     return 0;  
 }
 
-int drmMMLock(int fd, unsigned memType)
+/*
+ * If this function returns an error, and lockBM was set to 1,
+ * the buffer manager is NOT locked.
+ */
+
+int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict)
 {
     struct drm_mm_type_arg arg;
     int ret;
 
     memset(&arg, 0, sizeof(arg));
     arg.mem_type = memType;
+    arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
+    arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0;
 
     do{
         ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg);
@@ -2879,7 +2838,7 @@ int drmMMLock(int fd, unsigned memType)
     return (ret) ? -errno : 0;
 }
 
-int drmMMUnlock(int fd, unsigned memType)
+int drmMMUnlock(int fd, unsigned memType, int unlockBM)
 {
     struct drm_mm_type_arg arg;
     int ret;
@@ -2887,6 +2846,7 @@ int drmMMUnlock(int fd, unsigned memType)
     memset(&arg, 0, sizeof(arg));
     
     arg.mem_type = memType;
+    arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
 
     do{
        ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg);
@@ -2895,6 +2855,30 @@ int drmMMUnlock(int fd, unsigned memType)
     return (ret) ? -errno : 0;
 }
 
+int drmBOVersion(int fd, unsigned int *major,
+                unsigned int *minor,
+                unsigned int *patchlevel)
+{
+    struct drm_bo_version_arg arg;
+    int ret;
+
+    memset(&arg, 0, sizeof(arg));
+    ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
+    if (ret)
+       return ret;
+
+    if (major)
+       *major = arg.major;
+    if (minor)
+       *minor = arg.minor;
+    if (patchlevel)
+       *patchlevel = arg.patchlevel;
+
+    return (ret) ? -errno : 0;
+}
+
+
+
 #define DRM_MAX_FDS 16
 static struct {
     char *BusID;
index f817d81..49ae2c0 100644 (file)
@@ -164,7 +164,6 @@ extern int drmBOInfo(int fd, drmBO *buf);
 extern int drmBOBusy(int fd, drmBO *buf, int *busy);
 
 extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
-int drmBOSetPin(int fd, drmBO *buf, int pin);
 
 /*
  * Initialization functions.
@@ -173,8 +172,16 @@ int drmBOSetPin(int fd, drmBO *buf, int pin);
 extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
                     unsigned memType);
 extern int drmMMTakedown(int fd, unsigned memType);
-extern int drmMMLock(int fd, unsigned memType);
-extern int drmMMUnlock(int fd, unsigned memType);
+extern int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict);
+extern int drmMMUnlock(int fd, unsigned memType, int unlockBM);
+extern int drmBOSetStatus(int fd, drmBO *buf, 
+                         uint64_t flags, uint64_t mask,
+                         unsigned int hint, 
+                         unsigned int desired_tile_stride,
+                         unsigned int tile_info);
+extern int drmBOVersion(int fd, unsigned int *major,
+                       unsigned int *minor,
+                       unsigned int *patchlevel);
 
 
 #endif
index 6eb5bf5..7f6b123 100644 (file)
@@ -269,7 +269,6 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
 ifneq ($(PAGE_AGP),0)
 EXTRA_CFLAGS += -DHAVE_PAGE_AGP
 endif
-EXTRA_CFLAGS += -g
 
 # Start with all modules turned off.
 CONFIG_DRM_GAMMA := n
index 6cbe3a2..457e3f3 100644 (file)
@@ -14,7 +14,7 @@ drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
                drm_memory_debug.o ati_pcigart.o drm_sman.o \
                drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
                drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \
-               drm_edid.o drm_modes.o
+               drm_edid.o drm_modes.o drm_bo_lock.o
 tdfx-objs   := tdfx_drv.o
 r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
 mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
index 217a33b..b6a972e 100644 (file)
@@ -80,7 +80,8 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)
 
        DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
 
-       if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
+       if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+           || bo->mem.mem_type != bo->pinned_mem_type) {
                man = &bo->dev->bm.man[bo->mem.mem_type];
                list_add_tail(&bo->lru, &man->lru);
        } else {
@@ -637,8 +638,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
                mutex_lock(&entry->mutex);
                mutex_lock(&dev->struct_mutex);
                list_del_init(l);
-               if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED &&
-                   entry->fence_class == fence_class) {
+               if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
                        count++;
                        if (entry->fence)
                                drm_fence_usage_deref_locked(&entry->fence);
@@ -760,7 +760,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
                atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
                mutex_lock(&entry->mutex);
-               BUG_ON(entry->pinned);
+               BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
 
                ret = drm_bo_evict(entry, mem_type, no_wait);
                mutex_unlock(&entry->mutex);
@@ -920,24 +920,37 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
 EXPORT_SYMBOL(drm_bo_mem_space);
 
 static int drm_bo_new_mask(struct drm_buffer_object * bo,
-                          uint64_t new_mask, uint32_t hint)
+                          uint64_t new_flags, uint64_t used_mask)
 {
        uint32_t new_props;
 
        if (bo->type == drm_bo_type_user) {
-               DRM_ERROR("User buffers are not supported yet\n");
+               DRM_ERROR("User buffers are not supported yet.\n");
                return -EINVAL;
        }
 
-       new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
-                               DRM_BO_FLAG_READ);
+       if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+               DRM_ERROR
+                   ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+                    "processes.\n");
+               return -EPERM;
+       }
+
+       if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
+               DRM_ERROR
+                       ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
+               return -EPERM;
+       }
+
+       new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+                                DRM_BO_FLAG_READ);
 
        if (!new_props) {
                DRM_ERROR("Invalid buffer object rwx properties\n");
                return -EINVAL;
        }
 
-       bo->mem.mask = new_mask;
+       bo->mem.mask = new_flags;
        return 0;
 }
 
@@ -1058,13 +1071,6 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
 /*
  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
  * Until then, we cannot really do anything with it except delete it.
- * The unfenced list is a PITA, and the operations
- * 1) validating
- * 2) submitting commands
- * 3) fencing
- * Should really be an atomic operation.
- * We now "solve" this problem by keeping
- * the buffer "unfenced" after validating, but before fencing.
  */
 
 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
@@ -1147,11 +1153,9 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
                return -EINVAL;
 
        mutex_lock(&bo->mutex);
-       if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
-               ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+       if (ret)
+               goto out;
 
        /*
         * If this returns true, we are currently unmapped.
@@ -1294,10 +1298,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
 
        mutex_lock(&bm->evict_mutex);
        mutex_lock(&dev->struct_mutex);
-       list_del(&bo->lru);
-       list_add_tail(&bo->lru, &bm->unfenced);
-       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
-                       _DRM_BO_FLAG_UNFENCED);
+       list_del_init(&bo->lru);
        mutex_unlock(&dev->struct_mutex);
 
        /*
@@ -1317,10 +1318,6 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
                                drm_mm_put_block(mem.mm_node);
                        mem.mm_node = NULL;
                }
-               DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-               DRM_WAKEUP(&bo->event_queue);
-               list_del(&bo->lru);
-               drm_bo_add_to_lru(bo);
                mutex_unlock(&dev->struct_mutex);
        }
 
@@ -1371,12 +1368,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
                return ret;
        }
 
-       if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
-               DRM_ERROR("Attempt to validate pinned buffer into different memory "
-                   "type\n");
-               return -EINVAL;
-       }
-
        /*
         * We're switching command submission mechanism,
         * or cannot simply rely on the hardware serializing for us.
@@ -1418,6 +1409,37 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
        }
 
        /*
+        * Pinned buffers.
+        */
+
+       if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+               bo->pinned_mem_type = bo->mem.mem_type;
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(&bo->pinned_lru);
+               drm_bo_add_to_pinned_lru(bo);
+
+               if (bo->pinned_node != bo->mem.mm_node) {
+                       if (bo->pinned_node != NULL)
+                               drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = bo->mem.mm_node;
+               }
+
+               mutex_unlock(&dev->struct_mutex);
+
+       } else if (bo->pinned_node != NULL) {
+
+               mutex_lock(&dev->struct_mutex);
+
+               if (bo->pinned_node != bo->mem.mm_node)
+                       drm_mm_put_block(bo->pinned_node);
+
+               list_del_init(&bo->pinned_lru);
+               bo->pinned_node = NULL;
+               mutex_unlock(&dev->struct_mutex);
+
+       }
+
+       /*
         * We might need to add a TTM.
         */
 
@@ -1467,7 +1489,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
 
 
        DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
-       ret = drm_bo_new_mask(bo, flags, hint);
+       ret = drm_bo_new_mask(bo, flags, mask);
        if (ret)
                goto out;
 
@@ -1487,7 +1509,9 @@ EXPORT_SYMBOL(drm_bo_do_validate);
 
 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
                           uint32_t fence_class,
-                          uint64_t flags, uint64_t mask, uint32_t hint,
+                          uint64_t flags, uint64_t mask, 
+                          uint32_t hint,
+                          int use_old_fence_class,
                           struct drm_bo_info_rep * rep,
                           struct drm_buffer_object **bo_rep)
 {
@@ -1500,10 +1524,20 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
        bo = drm_lookup_buffer_object(file_priv, handle, 1);
        mutex_unlock(&dev->struct_mutex);
 
-       if (!bo) {
+       if (!bo) 
                return -EINVAL;
-       }
 
+       if (use_old_fence_class)
+               fence_class = bo->fence_class;
+
+       /*
+        * Only allow creator to change shared buffer mask.
+        */
+
+       if (bo->base.owner != file_priv) 
+               mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
+
+               
        ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
                                 no_wait, rep);
 
@@ -1516,10 +1550,6 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
 }
 EXPORT_SYMBOL(drm_bo_handle_validate);
 
-/**
- * Fills out the generic buffer object ioctl reply with the information for
- * the BO with id of handle.
- */
 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
                              struct drm_bo_info_rep *rep)
 {
@@ -1586,7 +1616,6 @@ int drm_buffer_object_create(struct drm_device *dev,
 {
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_buffer_object *bo;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
        int ret = 0;
        unsigned long num_pages;
 
@@ -1630,8 +1659,10 @@ int drm_buffer_object_create(struct drm_device *dev,
        bo->mem.page_alignment = page_alignment;
        bo->buffer_start = buffer_start;
        bo->priv_flags = 0;
-       bo->mem.flags = 0ULL;
-       bo->mem.mask = 0ULL;
+       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | 
+               DRM_BO_FLAG_MAPPABLE;
+       bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+               DRM_BO_FLAG_MAPPABLE;
        atomic_inc(&bm->count);
        ret = drm_bo_new_mask(bo, mask, hint);
 
@@ -1646,21 +1677,10 @@ int drm_buffer_object_create(struct drm_device *dev,
                        goto out_err;
        }
 
-       bo->fence_class = 0;
-       ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
-       if (ret) {
-               DRM_ERROR("Driver did not support given buffer permissions\n");
-               goto out_err;
-       }
-
-       ret = drm_bo_add_ttm(bo);
+       ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
        if (ret)
                goto out_err;
 
-       mutex_lock(&dev->struct_mutex);
-       drm_bo_add_to_lru(bo);
-       mutex_unlock(&dev->struct_mutex);
-
        mutex_unlock(&bo->mutex);
        *buf_obj = bo;
        return 0;
@@ -1673,6 +1693,7 @@ int drm_buffer_object_create(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_buffer_object_create);
 
+
 static int drm_bo_add_user_object(struct drm_file *file_priv,
                                  struct drm_buffer_object *bo, int shareable)
 {
@@ -1694,88 +1715,6 @@ static int drm_bo_add_user_object(struct drm_file *file_priv,
        return ret;
 }
 
-static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
-{
-       LOCK_TEST_WITH_RETURN(dev, file_priv);
-       return 0;
-}
-
-int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_op_arg curarg;
-       struct drm_bo_op_arg *arg = data;
-       struct drm_bo_op_req *req = &arg->d.req;
-       struct drm_bo_info_rep rep;
-       unsigned long next = 0;
-       void __user *curuserarg = NULL;
-       int ret;
-
-       DRM_DEBUG("drm_bo_op_ioctl\n");
-
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-
-       do {
-               if (next != 0) {
-                       curuserarg = (void __user *)next;
-                       if (copy_from_user(&curarg, curuserarg,
-                                          sizeof(curarg)) != 0)
-                               return -EFAULT;
-                       arg = &curarg;
-               }
-
-               if (arg->handled) {
-                       next = arg->next;
-                       continue;
-               }
-               req = &arg->d.req;
-               ret = 0;
-               switch (req->op) {
-               case drm_bo_validate:
-                       ret = drm_bo_lock_test(dev, file_priv);
-                       if (ret)
-                               break;
-                       ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
-                                                    req->bo_req.fence_class,
-                                                    req->bo_req.flags,
-                                                    req->bo_req.mask,
-                                                    req->bo_req.hint,
-                                                    &rep, NULL);
-                       break;
-               case drm_bo_fence:
-                       ret = -EINVAL;
-                       DRM_ERROR("Function is not implemented yet.\n");
-                       break;
-               case drm_bo_ref_fence:
-                       ret = -EINVAL;
-                       DRM_ERROR("Function is not implemented yet.\n");
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-               next = arg->next;
-
-               /*
-                * A signal interrupted us. Make sure the ioctl is restartable.
-                */
-
-               if (ret == -EAGAIN)
-                       return -EAGAIN;
-
-               arg->handled = 1;
-               arg->d.rep.ret = ret;
-               arg->d.rep.bo_info = rep;
-               if (arg != data) {
-                       if (copy_to_user(curuserarg, &curarg,
-                                        sizeof(curarg)) != 0)
-                               return -EFAULT;
-               }
-       } while (next != 0);
-       return 0;
-}
-
 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_bo_create_arg *arg = data;
@@ -1814,15 +1753,43 @@ out:
        return ret;
 }
 
-int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_bo_setstatus_ioctl(struct drm_device *dev, 
+                          void *data, struct drm_file *file_priv)
 {
        struct drm_bo_map_wait_idle_arg *arg = data;
        struct drm_bo_info_req *req = &arg->d.req;
        struct drm_bo_info_rep *rep = &arg->d.rep;
        int ret;
 
-       DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_read_lock(&dev->bm.bm_lock);
+       if (ret)
+               return ret;
+
+       ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+                                    req->flags,
+                                    req->mask,
+                                    req->hint | DRM_BO_HINT_DONT_FENCE,
+                                    1,
+                                    rep, NULL);
 
+       (void) drm_bo_read_unlock(&dev->bm.bm_lock);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1840,9 +1807,6 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 {
        struct drm_bo_handle_arg *arg = data;
        int ret;
-
-       DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
-
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1861,8 +1825,6 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *
        struct drm_user_object *uo;
        int ret;
 
-       DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
-
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1885,8 +1847,6 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file
        struct drm_bo_handle_arg *arg = data;
        int ret = 0;
 
-       DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
-
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1903,8 +1863,6 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
        struct drm_bo_info_rep *rep = &arg->d.rep;
        int ret;
 
-       DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
-
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1923,9 +1881,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
        struct drm_bo_info_req *req = &arg->d.req;
        struct drm_bo_info_rep *rep = &arg->d.rep;
        int ret;
-
-       DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
-
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
@@ -1939,177 +1894,10 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
        return 0;
 }
 
-/**
- * Pins or unpins the given buffer object in the given memory area.
- *
- * Pinned buffers will not be evicted from or move within their memory area.
- * Must be called with the hardware lock held for pinning.
- */
-int
-drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
-    int pin)
-{
-       int ret = 0;
-
-       mutex_lock(&bo->mutex);
-       if (bo->pinned == pin) {
-               mutex_unlock(&bo->mutex);
-               return 0;
-       }
-
-       if (pin) {
-               ret = drm_bo_wait_unfenced(bo, 0, 0);
-               if (ret) {
-                       mutex_unlock(&bo->mutex);
-                       return ret;
-               }
-
-               /* Validate the buffer into its pinned location, with no
-                * pending fence.
-                */
-               ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0);
-               if (ret) {
-                       mutex_unlock(&bo->mutex);
-                       return ret;
-               }
-
-               /* Pull the buffer off of the LRU and add it to the pinned
-                * list
-                */
-               bo->pinned_mem_type = bo->mem.mem_type;
-               mutex_lock(&dev->struct_mutex);
-               list_del_init(&bo->lru);
-               list_del_init(&bo->pinned_lru);
-               drm_bo_add_to_pinned_lru(bo);
-
-               if (bo->pinned_node != bo->mem.mm_node) {
-                       if (bo->pinned_node != NULL)
-                               drm_mm_put_block(bo->pinned_node);
-                       bo->pinned_node = bo->mem.mm_node;
-               }
-
-               bo->pinned = pin;
-               mutex_unlock(&dev->struct_mutex);
-
-       } else {
-               mutex_lock(&dev->struct_mutex);
-
-               /* Remove our buffer from the pinned list */
-               if (bo->pinned_node != bo->mem.mm_node)
-                       drm_mm_put_block(bo->pinned_node);
-
-               list_del_init(&bo->pinned_lru);
-               bo->pinned_node = NULL;
-               bo->pinned = pin;
-               mutex_unlock(&dev->struct_mutex);
-       }
-       mutex_unlock(&bo->mutex);
-       return 0;
-}
-EXPORT_SYMBOL(drm_bo_set_pin);
-
-int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_bo_set_pin_arg *arg = data;
-       struct drm_bo_set_pin_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_buffer_object *bo;
-       int ret;
-
-       DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
-           req->handle, req->pin);
-
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-
-       if (req->pin < 0 || req->pin > 1) {
-               DRM_ERROR("Bad arguments to set_pin\n");
-               return -EINVAL;
-       }
-
-       if (req->pin)
-               LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-       if (!bo) {
-               return -EINVAL;
-       }
-
-       ret = drm_bo_set_pin(dev, bo, req->pin);
-       if (ret) {
-               drm_bo_usage_deref_unlocked(&bo);
-               return ret;
-       }
-
-       drm_bo_fill_rep_arg(bo, rep);
-       drm_bo_usage_deref_unlocked(&bo);
-
-       return 0;
-}
-
-
-/**
- *Clean the unfenced list and put on regular LRU.
- *This is part of the memory manager cleanup and should only be
- *called with the DRI lock held.
- *Call dev->struct_sem locked.
- */
-
-static void drm_bo_clean_unfenced(struct drm_device *dev)
-{
-       struct drm_buffer_manager *bm  = &dev->bm;
-       struct list_head *head, *list;
-       struct drm_buffer_object *entry;
-       struct drm_fence_object *fence;
-
-       head = &bm->unfenced;
-
-       if (list_empty(head))
-               return;
-
-       DRM_ERROR("Clean unfenced\n");
-
-       if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
-
-               /*
-                * Fixme: Should really wait here.
-                */
-       }
-
-       if (fence)
-               drm_fence_usage_deref_locked(&fence);
-
-       if (list_empty(head))
-               return;
-
-       DRM_ERROR("Really clean unfenced\n");
-
-       list = head->next;
-       while(list != head) {
-               prefetch(list->next);
-               entry = list_entry(list, struct drm_buffer_object, lru);
-
-               atomic_inc(&entry->usage);
-               mutex_unlock(&dev->struct_mutex);
-               mutex_lock(&entry->mutex);
-               mutex_lock(&dev->struct_mutex);
-
-               list_del(&entry->lru);
-               DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-               drm_bo_add_to_lru(entry);
-               mutex_unlock(&entry->mutex);
-               list = head->next;
-       }
-}
-
 static int drm_bo_leave_list(struct drm_buffer_object * bo,
                             uint32_t mem_type,
-                            int free_pinned, int allow_errors)
+                            int free_pinned,
+                            int allow_errors)
 {
        struct drm_device *dev = bo->dev;
        int ret = 0;
@@ -2133,10 +1921,11 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
                mutex_unlock(&dev->struct_mutex);
        }
 
-       if (bo->pinned) {
-               DRM_ERROR("A pinned buffer was present at "
+       if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
+               DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
                          "cleanup. Removing flag and evicting.\n");
-               bo->pinned = 0;
+               bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
+               bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
        }
 
        if (bo->mem.mem_type == mem_type)
@@ -2256,8 +2045,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
 
        ret = 0;
        if (mem_type > 0) {
-
-               drm_bo_clean_unfenced(dev);
+               BUG_ON(!list_empty(&bm->unfenced));
                drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
                drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
 
@@ -2295,7 +2083,6 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
                return 0;
        }
 
-       drm_bo_clean_unfenced(dev);
        ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
        if (ret)
                return ret;
@@ -2349,8 +2136,10 @@ int drm_bo_init_mm(struct drm_device * dev,
 EXPORT_SYMBOL(drm_bo_init_mm);
 
 /*
- * This is called from lastclose, so we don't need to bother about
- * any clients still running when we set the initialized flag to zero.
+ * This function is intended to be called on drm driver unload.
+ * If you decide to call it from lastclose, you must protect the call
+ * from a potentially racing drm_bo_driver_init in firstopen. 
+ * (This may happen on X server restart).
  */
 
 int drm_bo_driver_finish(struct drm_device * dev)
@@ -2360,7 +2149,6 @@ int drm_bo_driver_finish(struct drm_device * dev)
        unsigned i = DRM_BO_MEM_TYPES;
        struct drm_mem_type_manager *man;
 
-       mutex_lock(&dev->bm.init_mutex);
        mutex_lock(&dev->struct_mutex);
 
        if (!bm->initialized)
@@ -2400,18 +2188,24 @@ int drm_bo_driver_finish(struct drm_device * dev)
        }
       out:
        mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_bo_driver_finish);
 
+/*
+ * This function is intended to be called on drm driver load.
+ * If you decide to call it from firstopen, you must protect the call
+ * from a potentially racing drm_bo_driver_finish in lastclose. 
+ * (This may happen on X server restart).
+ */
+
 int drm_bo_driver_init(struct drm_device * dev)
 {
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
 
-       mutex_lock(&dev->bm.init_mutex);
+       drm_bo_init_lock(&bm->bm_lock);
        mutex_lock(&dev->struct_mutex);
        if (!driver)
                goto out_unlock;
@@ -2437,7 +2231,6 @@ int drm_bo_driver_init(struct drm_device * dev)
        INIT_LIST_HEAD(&bm->ddestroy);
       out_unlock:
        mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
        return ret;
 }
 
@@ -2450,14 +2243,15 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        int ret;
 
-       DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
-           arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
-
        if (!driver) {
                DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
+       ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
+       if (ret)
+               return ret;
+
        ret = -EINVAL;
        if (arg->magic != DRM_BO_INIT_MAGIC) {
                DRM_ERROR("You are using an old libdrm that is not compatible with\n"
@@ -2466,19 +2260,11 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
        }
        if (arg->major != DRM_BO_INIT_MAJOR) {
                DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
-                         "\tversion don't match. Got %d, expected %d,\n",
+                         "\tversion don't match. Got %d, expected %d.\n",
                          arg->major, DRM_BO_INIT_MAJOR);
                return -EINVAL;
        }
-       if (arg->minor > DRM_BO_INIT_MINOR) {
-               DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
-                         "\tlibdrm buffer object interface version is %d.%d.\n"
-                         "\tkernel DRM buffer object interface version is %d.%d\n",
-                         arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
-               return -EINVAL;
-       }
 
-       mutex_lock(&dev->bm.init_mutex);
        mutex_lock(&dev->struct_mutex);
        if (!bm->initialized) {
                DRM_ERROR("DRM memory manager was not initialized.\n");
@@ -2493,7 +2279,8 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
 
 out:
        mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
+       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
+
        if (ret)
                return ret;
 
@@ -2507,15 +2294,15 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        int ret;
 
-       DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
-
        if (!driver) {
                DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
-       LOCK_TEST_WITH_RETURN(dev, file_priv);
-       mutex_lock(&dev->bm.init_mutex);
+       ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
+       if (ret)
+               return ret;
+
        mutex_lock(&dev->struct_mutex);
        ret = -EINVAL;
        if (!bm->initialized) {
@@ -2533,7 +2320,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f
        }
 out:
        mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
+       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
+
        if (ret)
                return ret;
 
@@ -2546,47 +2334,52 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        int ret;
 
-       DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
-
        if (!driver) {
                DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
-       LOCK_TEST_WITH_RETURN(dev, file_priv);
-       mutex_lock(&dev->bm.init_mutex);
+       if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
+               DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
+               return -EINVAL;
+       }
+               
+       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
+               ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
+               if (ret)
+                       return ret;
+       }
+               
        mutex_lock(&dev->struct_mutex);
        ret = drm_bo_lock_mm(dev, arg->mem_type);
        mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
-       if (ret)
+       if (ret) {
+               (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
                return ret;
+       }
 
        return 0;
 }
 
-int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_mm_unlock_ioctl(struct drm_device *dev, 
+                       void *data, 
+                       struct drm_file *file_priv)
 {
+       struct drm_mm_type_arg *arg = data;
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        int ret;
 
-       DRM_DEBUG("drm_mm_unlock_ioctl\n");
-
        if (!driver) {
                DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
-       LOCK_TEST_WITH_RETURN(dev, file_priv);
-       mutex_lock(&dev->bm.init_mutex);
-       mutex_lock(&dev->struct_mutex);
-       ret = 0;
-
-       mutex_unlock(&dev->struct_mutex);
-       mutex_unlock(&dev->bm.init_mutex);
-       if (ret)
-               return ret;
-
+       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
+               ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
+               if (ret)
+                       return ret;
+       }
+               
        return 0;
 }
 
@@ -2735,3 +2528,15 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
 
        return 0;
 }
+
+int drm_bo_version_ioctl(struct drm_device *dev, void *data, 
+                        struct drm_file *file_priv)
+{
+       struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
+       
+       arg->major = DRM_BO_INIT_MAJOR;
+       arg->minor = DRM_BO_INIT_MINOR;
+       arg->patchlevel = DRM_BO_INIT_PATCH;
+
+       return 0;
+}
diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c
new file mode 100644 (file)
index 0000000..e5a8682
--- /dev/null
@@ -0,0 +1,178 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+/*
+ * This file implements a simple replacement for the buffer manager use
+ * of the heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode is fast, and 
+ * intended for in-kernel use only.
+ * Taking it in write mode is slow.
+ *
+ * The write mode is used only when there is a need to block all 
+ * user-space processes from allocating a 
+ * new memory area.
+ * Typical use in write mode is X server VT switching, and it's allowed
+ * to leave kernel space with the write lock held. If a user-space process
+ * dies while having the write-lock, it will be released during the file
+ * descriptor release.
+ *
+ * The read lock is typically placed at the start of an IOCTL- or 
+ * user-space callable function that may end up allocating a memory area.
+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
+ * unmappable regions to mappable. It's a bug to leave kernel space with the
+ * read lock held.
+ *
+ * Both read- and write lock taking is interruptible for low signal-delivery
+ * latency. The locking functions will return -EAGAIN if interrupted by a
+ * signal.
+ *
+ * Locking order: The lock should be taken BEFORE any kernel mutexes 
+ * or spinlocks.
+ */
+
+#include "drmP.h"
+
+void drm_bo_init_lock(struct drm_bo_lock *lock)
+{
+       DRM_INIT_WAITQUEUE(&lock->queue);
+       atomic_set(&lock->write_lock_pending, 0);
+       atomic_set(&lock->readers, 0);
+}
+
+void drm_bo_read_unlock(struct drm_bo_lock *lock)
+{
+       if (unlikely(atomic_add_negative(-1, &lock->readers)))
+               BUG();
+       if (atomic_read(&lock->readers) == 0)
+               wake_up_interruptible(&lock->queue);
+}
+
+EXPORT_SYMBOL(drm_bo_read_unlock);
+
+int drm_bo_read_lock(struct drm_bo_lock *lock)
+{
+       while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
+               int ret;
+               ret = wait_event_interruptible
+                   (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
+               if (ret)
+                       return -EAGAIN;
+       }
+
+       while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
+               int ret;
+               ret = wait_event_interruptible
+                   (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
+               if (ret)
+                       return -EAGAIN;
+       }
+       return 0;
+}
+
+EXPORT_SYMBOL(drm_bo_read_lock);
+
+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
+{
+       if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
+               return -EINVAL;
+       if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
+               return -EINVAL;
+       wake_up_interruptible(&lock->queue);
+       return 0;
+}
+
+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
+                                    struct drm_user_object *item)
+{
+       struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
+       int ret;
+
+       ret = __drm_bo_write_unlock(lock);
+       BUG_ON(ret);
+}
+
+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
+{
+       int ret = 0;
+       struct drm_device *dev;
+
+       if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) {
+               return -EINVAL;
+       }
+
+       while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
+               ret = wait_event_interruptible
+                   (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
+
+               if (ret) {
+                       atomic_set(&lock->write_lock_pending, 0);
+                       wake_up_interruptible(&lock->queue);
+                       return -EAGAIN;
+               }
+       }
+
+       /*
+        * Add a dummy user-object, the destructor of which will
+        * make sure the lock is released if the client dies 
+        * while holding it.
+        */
+
+       dev = file_priv->head->dev;
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_add_user_object(file_priv, &lock->base, 0);
+       lock->base.remove = &drm_bo_write_lock_remove;
+       lock->base.type = drm_lock_type;
+       if (ret) {
+               (void)__drm_bo_write_unlock(lock);
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_ref_object *ro;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (lock->base.owner != file_priv) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
+       BUG_ON(!ro);
+       drm_remove_ref_object(file_priv, ro);
+       lock->base.owner = NULL;
+
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
index e51aedb..ae44e50 100644 (file)
@@ -212,6 +212,8 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
        unsigned long bus_offset;
        unsigned long bus_size;
        
+       dev = bo->dev;
+       while(drm_bo_read_lock(&dev->bm.bm_lock));
 
        mutex_lock(&bo->mutex);
 
@@ -289,6 +291,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
                data->type = VM_FAULT_OOM;
 out_unlock:
        mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
        return NULL;
 }
 
index 0d1a6d1..4a871d6 100644 (file)
@@ -136,10 +136,14 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
 
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, 
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, 
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, 
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, 
+                     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
@@ -155,10 +159,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
 };
 
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
index 9c9826e..8ae55bf 100644 (file)
@@ -43,6 +43,7 @@ struct drm_bo_mem_reg;
 enum drm_object_type {
        drm_fence_type,
        drm_buffer_type,
+       drm_lock_type,
            /*
             * Add other user space object types here.
             */
@@ -377,7 +378,6 @@ struct drm_buffer_object {
        unsigned long num_pages;
 
        /* For pinned buffers */
-       int pinned;
        struct drm_mm_node *pinned_node;
        uint32_t pinned_mem_type;
        struct list_head pinned_lru;
@@ -415,6 +415,13 @@ struct drm_mem_type_manager {
        void *io_addr;
 };
 
+struct drm_bo_lock {
+       struct drm_user_object base;
+       wait_queue_head_t queue;
+       atomic_t write_lock_pending;
+       atomic_t readers;
+};
+
 #define _DRM_FLAG_MEMTYPE_FIXED     0x00000001 /* Fixed (on-card) PCI memory */
 #define _DRM_FLAG_MEMTYPE_MAPPABLE  0x00000002 /* Memory mappable */
 #define _DRM_FLAG_MEMTYPE_CACHED    0x00000004 /* Cached binding */
@@ -424,8 +431,8 @@ struct drm_mem_type_manager {
 #define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020 /* Select caching */
 
 struct drm_buffer_manager {
-       struct mutex init_mutex;
-       struct mutex evict_mutex;
+        struct drm_bo_lock bm_lock;
+        struct mutex evict_mutex;
        int nice_mode;
        int initialized;
        struct drm_file *last_to_validate;
@@ -471,13 +478,12 @@ extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
 extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int drm_bo_driver_finish(struct drm_device *dev);
 extern int drm_bo_driver_init(struct drm_device *dev);
 extern int drm_bo_pci_offset(struct drm_device *dev,
@@ -513,6 +519,7 @@ extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
 extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
                                  uint32_t fence_class, uint64_t flags,
                                  uint64_t mask, uint32_t hint,
+                                 int use_old_fence_class,
                                  struct drm_bo_info_rep * rep,
                                  struct drm_buffer_object **bo_rep);
 extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
@@ -609,6 +616,21 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * m
                               void **virtual);
 extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
                                void *virtual);
+/*
+ * drm_bo_lock.c 
+ * Simple replacement for the hardware lock on buffer manager init and clean.
+ */
+
+
+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
+extern int drm_bo_write_lock(struct drm_bo_lock *lock, 
+                            struct drm_file *file_priv);
+
+extern int drm_bo_write_unlock(struct drm_bo_lock *lock, 
+                              struct drm_file *file_priv);
+
 #ifdef CONFIG_DEBUG_MUTEXES
 #define DRM_ASSERT_LOCKED(_mutex)                                      \
        BUG_ON(!mutex_is_locked(_mutex) ||                              \
index db8c0bd..34e8d2b 100644 (file)
@@ -72,7 +72,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
        init_timer(&dev->timer);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->ctxlist_mutex);
-       mutex_init(&dev->bm.init_mutex);
        mutex_init(&dev->bm.evict_mutex);
 
        idr_init(&dev->drw_idr);
index c4e790e..d2554f3 100644 (file)
@@ -728,10 +728,17 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
        if (address > vma->vm_end)
                return NOPFN_SIGBUS;
 
-       err = mutex_lock_interruptible(&bo->mutex);
+       dev = bo->dev;
+       err = drm_bo_read_lock(&dev->bm.bm_lock);
        if (err)
                return NOPFN_REFAULT;
 
+       err = mutex_lock_interruptible(&bo->mutex);
+       if (err) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return NOPFN_REFAULT;
+       }
+
        err = drm_bo_wait(bo, 0, 0, 0);
        if (err) {
                ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
@@ -754,7 +761,6 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
                }
        }
 
-       dev = bo->dev;
        err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
                                &bus_size);
 
@@ -792,6 +798,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
        }
 out_unlock:
        mutex_unlock(&bo->mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
        return ret;
 }
 #endif
index 682a899..01fae64 100644 (file)
@@ -124,6 +124,8 @@ uint32_t i915_evict_mask(struct drm_buffer_object *bo)
        }
 }
 
+#if 0 /* See comment below */
+
 static void i915_emit_copy_blit(struct drm_device * dev,
                                uint32_t src_offset,
                                uint32_t dst_offset,
@@ -224,6 +226,16 @@ out_cleanup:
        return ret;
 }
 
+#endif
+
+/*
+ * Disable i915_move_flip for now, since we can't guarantee that the hardware lock
+ * is held here. To re-enable we need to make sure either
+ * a) The X server is using DRM to submit commands to the ring, or
+ * b) DRM can use the HP ring for these blits. This means i915 needs to implement
+ *    a new ring submission mechanism and fence class.
+ */
+
 int i915_move(struct drm_buffer_object * bo,
              int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
 {
@@ -232,10 +244,10 @@ int i915_move(struct drm_buffer_object * bo,
        if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
                return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
        } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
-               if (i915_move_flip(bo, evict, no_wait, new_mem))
+               if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/)
                        return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
        } else {
-               if (i915_move_blit(bo, evict, no_wait, new_mem))
+               if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/)
                        return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
        }
        return 0;
index c9cb293..564a913 100644 (file)
@@ -500,8 +500,9 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
                                       DRM_BO_FLAG_READ |
                                       DRM_BO_FLAG_WRITE |
                                       DRM_BO_FLAG_MEM_TT |
-                                      DRM_BO_FLAG_MEM_VRAM,
-                                      0, 0, 0,
+                                      DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT,
+                                      DRM_BO_HINT_DONT_FENCE, 0, 0,
                                       &fbo);
        if (ret || !fbo) {
                printk(KERN_ERR "failed to allocate framebuffer\n");
@@ -510,14 +511,6 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
                return -EINVAL;
        }
 
-       ret = drm_bo_set_pin(dev, fbo, 1);
-       if (ret) {
-               printk(KERN_ERR "failed to pin framebuffer, aborting\n");
-               drm_framebuffer_destroy(fb);
-               framebuffer_release(info);
-               return -EINVAL;
-       }
-
        fb->offset = fbo->offset;
        fb->bo = fbo;
        printk("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
index 7e162a8..9222b08 100644 (file)
@@ -133,6 +133,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv,
                      dev_priv->agp_initialized)) {
                DRM_ERROR
                    ("Attempt to allocate from uninitialized memory manager.\n");
+               mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
 
index d609fdd..5c8acfa 100644 (file)
@@ -675,6 +675,14 @@ struct drm_fence_arg {
  */
 
 /*
+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
+ * available to root and must be manually removed before buffer manager shutdown
+ * or lock.
+ * Flags: Acknowledge
+ */
+#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
+
+/*
  * Mask: Require that the buffer is placed in mappable memory when validated.
  *       If not set the buffer may or may not be in mappable memory when validated.
  * Flags: If set, the buffer is in mappable memory.
@@ -716,6 +724,7 @@ struct drm_fence_arg {
  * Flags: Acknowledge.
  */
 #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
+#define DRM_BO_FLAG_TILE           (1ULL << 15)
 
 /*
  * Memory type flags that can be or'ed together in the mask, but only
@@ -748,11 +757,11 @@ struct drm_fence_arg {
 /* Don't place this buffer on the unfenced list.*/
 #define DRM_BO_HINT_DONT_FENCE  0x00000004
 #define DRM_BO_HINT_WAIT_LAZY   0x00000008
-#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
 
 #define DRM_BO_INIT_MAGIC 0xfe769812
-#define DRM_BO_INIT_MAJOR 0
-#define DRM_BO_INIT_MINOR 1
+#define DRM_BO_INIT_MAJOR 1
+#define DRM_BO_INIT_MINOR 0
+#define DRM_BO_INIT_PATCH 0
 
 
 struct drm_bo_info_req {
@@ -761,6 +770,8 @@ struct drm_bo_info_req {
        unsigned int handle;
        unsigned int hint;
        unsigned int fence_class;
+       unsigned int desired_tile_stride;
+       unsigned int tile_info;
        unsigned int pad64;
 };
 
@@ -772,27 +783,6 @@ struct drm_bo_create_req {
        unsigned int page_alignment;
 };
 
-struct drm_bo_op_req {
-       enum {
-               drm_bo_validate,
-               drm_bo_fence,
-               drm_bo_ref_fence,
-       } op;
-       unsigned int arg_handle;
-       struct drm_bo_info_req bo_req;
-};
-
-struct drm_bo_set_pin_req {
-       /** Buffer object ID */
-       unsigned int handle;
-       /**
-        * - 0: Unpin the given buffer object.
-        * - 1: Pin the given buffer object, requiring that its offset and
-        * memory area stay constant until unpin.  The intended use is for
-        * scanout buffers.
-        */
-       unsigned int pin;
-};
 
 /*
  * Reply flags
@@ -849,6 +839,17 @@ struct drm_bo_map_wait_idle_arg {
        } d;
 };
 
+struct drm_bo_op_req {
+       enum {
+               drm_bo_validate,
+               drm_bo_fence,
+               drm_bo_ref_fence,
+       } op;
+       unsigned int arg_handle;
+       struct drm_bo_info_req bo_req;
+};
+
+
 struct drm_bo_op_arg {
        uint64_t next;
        union {
@@ -859,12 +860,6 @@ struct drm_bo_op_arg {
        unsigned int pad64;
 };
 
-struct drm_bo_set_pin_arg {
-       union {
-               struct drm_bo_set_pin_req req;
-               struct drm_bo_info_rep rep;
-       } d;
-};
 
 #define DRM_BO_MEM_LOCAL 0
 #define DRM_BO_MEM_TT 1
@@ -877,8 +872,18 @@ struct drm_bo_set_pin_arg {
 
 #define DRM_BO_MEM_TYPES 8 /* For now. */
 
+#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
+
+struct drm_bo_version_arg {
+       uint32_t major;
+       uint32_t minor;
+       uint32_t patchlevel;
+};
+
 struct drm_mm_type_arg {
        unsigned int mem_type;
+        unsigned int lock_flags;
 };
 
 struct drm_mm_init_arg {
@@ -1069,10 +1074,11 @@ struct drm_mode_mode_cmd {
 #define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
 #define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
 #define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
-#define DRM_IOCTL_BO_OP                 DRM_IOWR(0xd3, struct drm_bo_op_arg)
+#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
 #define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
 #define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
-#define DRM_IOCTL_BO_SET_PIN           DRM_IOWR(0xd6, struct drm_bo_set_pin_arg)
+#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
+
 
 #define DRM_IOCTL_MODE_GETRESOURCES     DRM_IOWR(0xA0, struct drm_mode_card_res)
 #define DRM_IOCTL_MODE_GETCRTC          DRM_IOWR(0xA1, struct drm_mode_crtc)
index 0be4802..07b3a35 100644 (file)
@@ -180,6 +180,7 @@ static int i915_initialize(struct drm_device * dev,
        }
        DRM_DEBUG("Enabled hardware status page\n");
        dev->dev_private = (void *)dev_priv;
+       mutex_init(&dev_priv->cmdbuf_mutex);
        return 0;
 }
 
@@ -461,7 +462,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
        int i = 0, count, ret;
 
        if (cmd->sz & 0x3) {
-               DRM_ERROR("alignment");
+               DRM_ERROR("alignment\n");
                return -EINVAL;
        }
 
@@ -499,7 +500,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
        RING_LOCALS;
 
        if ((batch->start | batch->used) & 0x7) {
-               DRM_ERROR("alignment");
+               DRM_ERROR("alignment\n");
                return -EINVAL;
        }
 
@@ -831,6 +832,9 @@ int i915_process_relocs(struct drm_file *file_priv,
 
        } while (reloc_offset != reloc_end);
 out:
+       drm_bo_kunmap(&relocatee->kmap);
+       relocatee->data_page = NULL;
+
        drm_bo_kunmap(&reloc_kmap);
 
        mutex_lock(&dev->struct_mutex);
@@ -868,7 +872,6 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
                }
        }
        
-       drm_bo_kunmap(&relocatee.kmap);
        mutex_lock(&dev->struct_mutex);
        drm_bo_usage_deref_locked(&relocatee.buf);
        mutex_unlock(&dev->struct_mutex);
@@ -929,11 +932,19 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
                buf_handle = req->bo_req.handle;
                buf_reloc_handle = arg.reloc_handle;
 
+               if (buf_reloc_handle) {
+                       ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count);
+                       if (ret)
+                               goto out_err;
+                       DRM_MEMORYBARRIER();
+               }
+
                rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
                                                 req->bo_req.fence_class,
                                                 req->bo_req.flags,
                                                 req->bo_req.mask,
                                                 req->bo_req.hint,
+                                                0,
                                                 &rep.bo_info,
                                                 &buffers[buf_count]);
 
@@ -953,11 +964,6 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
                data = next;
                buf_count++;
 
-               if (buf_reloc_handle) {
-                       ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count);
-                       if (ret)
-                               goto out_err;
-               }
        } while (next != 0);
        *num_buffers = buf_count;
        return 0;
@@ -989,8 +995,6 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
        }
 
 
-       LOCK_TEST_WITH_RETURN(dev, file_priv);
-
        if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
                                                        batch->num_cliprects *
                                                        sizeof(struct drm_clip_rect)))
@@ -999,11 +1003,30 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
        if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
                return -EINVAL;
 
+
+       ret = drm_bo_read_lock(&dev->bm.bm_lock);
+       if (ret) 
+               return ret;
+
+       /*
+        * The cmdbuf_mutex makes sure the validate-submit-fence
+        * operation is atomic. 
+        */
+
+       ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+       if (ret) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               return -EAGAIN;
+       }
+
        num_buffers = exec_buf->num_buffers;
 
        buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER);
-       if (!buffers)
+       if (!buffers) {
+               drm_bo_read_unlock(&dev->bm.bm_lock);
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
                return -ENOMEM;
+        }
 
        /* validate buffer list + fixup relocations */
        ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
@@ -1012,7 +1035,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
                goto out_free;
 
        /* make sure all previous memory operations have passed */
-       asm volatile("mfence":::"memory");
+       DRM_MEMORYBARRIER();
 
        /* submit buffer */
        batch->start = buffers[num_buffers-1]->offset;
@@ -1051,6 +1074,8 @@ out_err0:
 out_free:
        drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
 
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+       drm_bo_read_unlock(&dev->bm.bm_lock);
        return ret;
 }
 #endif
index 12ac8b6..be3212d 100644 (file)
@@ -143,6 +143,7 @@ struct drm_i915_private {
 #ifdef I915_HAVE_BUFFER
        void *agp_iomap;
        unsigned int max_validate_buffers;
+       struct mutex cmdbuf_mutex;
 #endif
 
        DRM_SPINTYPE swaps_lock;
index 8e419b8..3b43c72 100644 (file)
@@ -188,16 +188,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
                                       DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
                                       DRM_BO_FLAG_MEM_VRAM |
+                                      DRM_BO_FLAG_NO_EVICT |
                                       DRM_BO_HINT_DONT_FENCE, 0, 0x1, 0,
                                       &dev_priv->ring_buffer);
        if (ret < 0) {
-               DRM_ERROR("Unable to allocate ring buffer\n");
-               return -EINVAL;
-       }
-
-       ret = drm_bo_set_pin(dev, dev_priv->ring_buffer, 1);
-       if (ret < 0) {
-               DRM_ERROR("Unable to pin ring buffer\n");
+               DRM_ERROR("Unable to allocate or pin ring buffer\n");
                return -EINVAL;
        }