Remove the clean_unfenced function.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
index 61db102..89c014e 100644 (file)
@@ -1,8 +1,8 @@
 /**************************************************************************
- * 
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
  * All Rights Reserved.
- * 
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
  * "Software"), to deal in the Software without restriction, including
  * distribute, sub license, and/or sell copies of the Software, and to
  * permit persons to whom the Software is furnished to do so, subject to
  * the following conditions:
- * 
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- * 
- * 
  **************************************************************************/
 /*
  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 #include "drmP.h"
 
 /*
- * Buffer object locking policy:
- * Lock dev->struct_mutex;
- * Increase usage
- * Unlock dev->struct_mutex;
- * Lock buffer->mutex;
- * Do whatever you want;
- * Unlock buffer->mutex;
- * Decrease usage. Call destruction if zero.
+ * Locking may look a bit complicated but isn't really:
+ *
+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
+ * when there is a chance that it can be zero before or after the operation.
+ *
+ * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
+ * heads.
  *
- * User object visibility ups usage just once, since it has its own 
- * refcounting.
+ * bo->mutex protects the buffer object itself excluding the usage field.
+ * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
+ * both the bo->mutex and the dev->struct_mutex.
  *
- * Destruction:
- * lock dev->struct_mutex;
- * Verify that usage is zero. Otherwise unlock and continue.
- * Destroy object.
- * unlock dev->struct_mutex;
+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
+ * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
+ * traversal will, in general, need to be restarted.
  *
- * Mutex and spinlock locking orders:
- * 1.) Buffer mutex
- * 2.) Refer to ttm locking orders.
  */
 
-#define DRM_FLAG_MASKED(_old, _new, _mask) {\
-(_old) ^= (((_old) ^ (_new)) & (_mask)); \
-}
+static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
+static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
+static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
 
 static inline uint32_t drm_bo_type_flags(unsigned type)
 {
@@ -68,140 +63,198 @@ static inline uint32_t drm_bo_type_flags(unsigned type)
  * bo locked. dev->struct_mutex locked.
  */
 
-static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
-                             drm_buffer_manager_t * bm)
+void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
 {
-       struct list_head *list;
-       buf->mem_type = 0;
+       struct drm_mem_type_manager *man;
 
-       switch(buf->flags & DRM_BO_MASK_MEM) {
-       case DRM_BO_FLAG_MEM_TT:
-               buf->mem_type = DRM_BO_MEM_TT;
-               break;
-       case DRM_BO_FLAG_MEM_VRAM:
-               buf->mem_type = DRM_BO_MEM_VRAM;
-               break;
-       case DRM_BO_FLAG_MEM_LOCAL:
-               buf->mem_type = DRM_BO_MEM_LOCAL;
-               break;
-       default:
-               BUG_ON(1);              
-       }
-       list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
-               &bm->pinned[buf->mem_type] : &bm->lru[buf->mem_type];
-       list_add_tail(&buf->lru, list);
-       return;
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+       DRM_ASSERT_LOCKED(&bo->mutex);
+
+       man = &bo->dev->bm.man[bo->pinned_mem_type];
+       list_add_tail(&bo->pinned_lru, &man->pinned);
 }
 
-/*
- * bo locked.
- */
+void drm_bo_add_to_lru(struct drm_buffer_object * bo)
+{
+       struct drm_mem_type_manager *man;
 
-static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
-                               int force_no_move)
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
+       if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+           || bo->mem.mem_type != bo->pinned_mem_type) {
+               man = &bo->dev->bm.man[bo->mem.mem_type];
+               list_add_tail(&bo->lru, &man->lru);
+       } else {
+               INIT_LIST_HEAD(&bo->lru);
+       }
+}
+
+static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
 {
-       drm_device_t *dev = buf->dev;
+#ifdef DRM_ODD_MM_COMPAT
        int ret;
 
-       if (buf->mm_node) {
-               mutex_lock(&dev->struct_mutex);
-               if (evict)
-                       ret = drm_evict_ttm(buf->ttm);
-               else
-                       ret = drm_unbind_ttm(buf->ttm);
+       if (!bo->map_list.map)
+               return 0;
 
-               if (ret) {
-                       mutex_unlock(&dev->struct_mutex);
-                       if (ret == -EAGAIN)
-                               schedule();
-                       return ret;
-               }
+       ret = drm_bo_lock_kmm(bo);
+       if (ret)
+               return ret;
+       drm_bo_unmap_virtual(bo);
+       if (old_is_pci)
+               drm_bo_finish_unmap(bo);
+#else
+       if (!bo->map_list.map)
+               return 0;
 
-               if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
-                       drm_mm_put_block(buf->mm_node);
-                       buf->mm_node = NULL;
-               }
-               mutex_unlock(&dev->struct_mutex);
-       }
+       drm_bo_unmap_virtual(bo);
+#endif
+       return 0;
+}
 
-       buf->flags &= ~DRM_BO_FLAG_MEM_TT;
-       buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
+{
+#ifdef DRM_ODD_MM_COMPAT
+       int ret;
 
-       return 0;
+       if (!bo->map_list.map)
+               return;
+
+       ret = drm_bo_remap_bound(bo);
+       if (ret) {
+               DRM_ERROR("Failed to remap a bound buffer object.\n"
+                         "\tThis might cause a sigbus later.\n");
+       }
+       drm_bo_unlock_kmm(bo);
+#endif
 }
 
 /*
- * Lock dev->struct_mutex
+ * Call bo->mutex locked.
  */
 
-static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+static int drm_bo_add_ttm(struct drm_buffer_object * bo)
 {
+       struct drm_device *dev = bo->dev;
+       int ret = 0;
+       bo->ttm = NULL;
 
-       drm_buffer_manager_t *bm = &dev->bm;
+       DRM_ASSERT_LOCKED(&bo->mutex);
 
-       DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+       switch (bo->type) {
+       case drm_bo_type_dc:
+       case drm_bo_type_kernel:
+               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               if (!bo->ttm)
+                       ret = -ENOMEM;
+               break;
+       case drm_bo_type_user:
+               break;
+       default:
+               DRM_ERROR("Illegal buffer object type\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
+                                 struct drm_bo_mem_reg * mem,
+                                 int evict, int no_wait)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
+       int new_is_pci = drm_mem_reg_is_pci(dev, mem);
+       struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
+       struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
+       int ret = 0;
+
+       if (old_is_pci || new_is_pci ||
+           ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
+               ret = drm_bo_vm_pre_move(bo, old_is_pci);
+       if (ret)
+               return ret;
 
        /*
-        * Somone might try to access us through the still active BM lists.
+        * Create and bind a ttm if required.
         */
 
-       if (atomic_read(&bo->usage) != 0)
-               return;
-       if (!list_empty(&bo->ddestroy))
-               return;
-
-       if (bo->fence) {
-               if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
+       if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
+               ret = drm_bo_add_ttm(bo);
+               if (ret)
+                       goto out_err;
 
-                       drm_fence_object_flush(dev, bo->fence, bo->fence_type);
-                       list_add_tail(&bo->ddestroy, &bm->ddestroy);
-                       schedule_delayed_work(&bm->wq,
-                                             ((DRM_HZ / 100) <
-                                              1) ? 1 : DRM_HZ / 100);
-                       return;
-               } else {
-                       drm_fence_usage_deref_locked(dev, bo->fence);
-                       bo->fence = NULL;
+               if (mem->mem_type != DRM_BO_MEM_LOCAL) {
+                       ret = drm_bind_ttm(bo->ttm, mem);
+                       if (ret)
+                               goto out_err;
                }
        }
-       /*
-        * Take away from lru lists.
-        */
 
-       list_del_init(&bo->lru);
+       if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
 
-       if (bo->ttm) {
-               unsigned long _end = jiffies + DRM_HZ;
-               int ret;
+               struct drm_bo_mem_reg *old_mem = &bo->mem;
+               uint64_t save_flags = old_mem->flags;
+               uint64_t save_mask = old_mem->mask;
 
-               /*
-                * This temporarily unlocks struct_mutex. 
-                */
+               *old_mem = *mem;
+               mem->mm_node = NULL;
+               old_mem->mask = save_mask;
+               DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
 
-               do {
-                       ret = drm_unbind_ttm(bo->ttm);
-                       if (ret == -EAGAIN) {
-                               mutex_unlock(&dev->struct_mutex);
-                               schedule();
-                               mutex_lock(&dev->struct_mutex);
-                       }
-               } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
+       } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
+                  !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+
+               ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
+
+       } else if (dev->driver->bo_driver->move) {
+               ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
+
+       } else {
+
+               ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
 
-               if (ret) {
-                       DRM_ERROR("Couldn't unbind buffer. "
-                                 "Bad. Continuing anyway\n");
-               }
        }
 
-       if (bo->mm_node) {
-               drm_mm_put_block(bo->mm_node);
-               bo->mm_node = NULL;
+       if (ret)
+               goto out_err;
+
+       if (old_is_pci || new_is_pci)
+               drm_bo_vm_post_move(bo);
+
+       if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+               ret =
+                   dev->driver->bo_driver->invalidate_caches(dev,
+                                                             bo->mem.flags);
+               if (ret)
+                       DRM_ERROR("Can not flush read caches\n");
        }
-       if (bo->ttm_object) {
-               drm_ttm_object_deref_locked(dev, bo->ttm_object);
+
+       DRM_FLAG_MASKED(bo->priv_flags,
+                       (evict) ? _DRM_BO_FLAG_EVICTED : 0,
+                       _DRM_BO_FLAG_EVICTED);
+
+       if (bo->mem.mm_node)
+               bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+                       bm->man[bo->mem.mem_type].gpu_offset;
+
+
+       return 0;
+
+      out_err:
+       if (old_is_pci || new_is_pci)
+               drm_bo_vm_post_move(bo);
+
+       new_man = &bm->man[bo->mem.mem_type];
+       if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
+               drm_ttm_unbind(bo->ttm);
+               drm_destroy_ttm(bo->ttm);
+               bo->ttm = NULL;
        }
-       atomic_dec(&bm->count);
-       drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+
+       return ret;
 }
 
 /*
@@ -209,104 +262,202 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
  * Wait until the buffer is idle.
  */
 
-static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
-                      int no_wait)
+int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
+               int no_wait)
 {
-
-       drm_fence_object_t *fence = bo->fence;
        int ret;
 
-       if (fence) {
-               drm_device_t *dev = bo->dev;
-               if (drm_fence_object_signaled(fence, bo->fence_type)) {
-                       drm_fence_usage_deref_unlocked(dev, fence);
-                       bo->fence = NULL;
+       DRM_ASSERT_LOCKED(&bo->mutex);
+
+       if (bo->fence) {
+               if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
                if (no_wait) {
                        return -EBUSY;
                }
                ret =
-                   drm_fence_object_wait(dev, fence, lazy, ignore_signals,
+                   drm_fence_object_wait(bo->fence, lazy, ignore_signals,
                                          bo->fence_type);
                if (ret)
                        return ret;
 
-               drm_fence_usage_deref_unlocked(dev, fence);
-               bo->fence = NULL;
+               drm_fence_usage_deref_unlocked(&bo->fence);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_wait);
+
+static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
 
+       if (bo->fence) {
+               if (bm->nice_mode) {
+                       unsigned long _end = jiffies + 3 * DRM_HZ;
+                       int ret;
+                       do {
+                               ret = drm_bo_wait(bo, 0, 1, 0);
+                               if (ret && allow_errors)
+                                       return ret;
+
+                       } while (ret && !time_after_eq(jiffies, _end));
+
+                       if (bo->fence) {
+                               bm->nice_mode = 0;
+                               DRM_ERROR("Detected GPU lockup or "
+                                         "fence driver was taken down. "
+                                         "Evicting buffer.\n");
+                       }
+               }
+               if (bo->fence)
+                       drm_fence_usage_deref_unlocked(&bo->fence);
        }
        return 0;
 }
 
 /*
  * Call dev->struct_mutex locked.
+ * Attempts to remove all private references to a buffer by expiring its
+ * fence object and removing from lru lists and memory managers.
+ */
+
+static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       atomic_inc(&bo->usage);
+       mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&bo->mutex);
+
+       DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+
+       if (bo->fence && drm_fence_object_signaled(bo->fence,
+                                                  bo->fence_type, 0))
+               drm_fence_usage_deref_unlocked(&bo->fence);
+
+       if (bo->fence && remove_all)
+               (void)drm_bo_expire_fence(bo, 0);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (!atomic_dec_and_test(&bo->usage)) {
+               goto out;
+       }
+
+       if (!bo->fence) {
+               list_del_init(&bo->lru);
+               if (bo->mem.mm_node) {
+                       drm_mm_put_block(bo->mem.mm_node);
+                       if (bo->pinned_node == bo->mem.mm_node)
+                               bo->pinned_node = NULL;
+                       bo->mem.mm_node = NULL;
+               }
+               list_del_init(&bo->pinned_lru);
+               if (bo->pinned_node) {
+                       drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = NULL;
+               }
+               list_del_init(&bo->ddestroy);
+               mutex_unlock(&bo->mutex);
+               drm_bo_destroy_locked(bo);
+               return;
+       }
+
+       if (list_empty(&bo->ddestroy)) {
+               drm_fence_object_flush(bo->fence, bo->fence_type);
+               list_add_tail(&bo->ddestroy, &bm->ddestroy);
+               schedule_delayed_work(&bm->wq,
+                                     ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
+       }
+
+      out:
+       mutex_unlock(&bo->mutex);
+       return;
+}
+
+/*
+ * Verify that refcount is 0 and that there are no internal references
+ * to the buffer object. Then destroy it.
+ */
+
+static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
+       if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
+           list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
+           list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
+               if (bo->fence != NULL) {
+                       DRM_ERROR("Fence was non-zero.\n");
+                       drm_bo_cleanup_refs(bo, 0);
+                       return;
+               }
+
+#ifdef DRM_ODD_MM_COMPAT
+               BUG_ON(!list_empty(&bo->vma_list));
+               BUG_ON(!list_empty(&bo->p_mm_list));
+#endif
+
+               if (bo->ttm) {
+                       drm_ttm_unbind(bo->ttm);
+                       drm_destroy_ttm(bo->ttm);
+                       bo->ttm = NULL;
+               }
+
+               atomic_dec(&bm->count);
+
+               //              BUG_ON(!list_empty(&bo->base.list));
+               drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+
+               return;
+       }
+
+       /*
+        * Some stuff is still trying to reference the buffer object.
+        * Get rid of those references.
+        */
+
+       drm_bo_cleanup_refs(bo, 0);
+
+       return;
+}
+
+/*
+ * Call dev->struct_mutex locked.
  */
 
-static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
+static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
 {
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_buffer_manager *bm = &dev->bm;
 
-       drm_buffer_object_t *entry, *nentry;
+       struct drm_buffer_object *entry, *nentry;
        struct list_head *list, *next;
-       drm_fence_object_t *fence;
 
        list_for_each_safe(list, next, &bm->ddestroy) {
-               entry = list_entry(list, drm_buffer_object_t, ddestroy);
-               atomic_inc(&entry->usage);
-               if (atomic_read(&entry->usage) != 1) {
-                       atomic_dec(&entry->usage);
-                       continue;
-               }
+               entry = list_entry(list, struct drm_buffer_object, ddestroy);
 
                nentry = NULL;
                if (next != &bm->ddestroy) {
-                       nentry = list_entry(next, drm_buffer_object_t,
+                       nentry = list_entry(next, struct drm_buffer_object,
                                            ddestroy);
                        atomic_inc(&nentry->usage);
                }
 
-               mutex_unlock(&dev->struct_mutex);
-               mutex_lock(&entry->mutex);
-               fence = entry->fence;
-               if (fence && drm_fence_object_signaled(fence,
-                                                      entry->fence_type)) {
-                       drm_fence_usage_deref_locked(dev, fence);
-                       entry->fence = NULL;
-               }
+               drm_bo_cleanup_refs(entry, remove_all);
 
-               if (entry->fence && remove_all) {
-                       if (bm->nice_mode) {
-                               unsigned long _end = jiffies + 3 * DRM_HZ;
-                               int ret;
-                               do {
-                                       ret = drm_bo_wait(entry, 0, 1, 0);
-                               } while (ret && !time_after_eq(jiffies, _end));
-
-                               if (entry->fence) {
-                                       bm->nice_mode = 0;
-                                       DRM_ERROR("Detected GPU lockup or "
-                                                 "fence driver was taken down. "
-                                                 "Evicting waiting buffers.\n");
-                               }
-                       }
-                       if (entry->fence) {
-                               drm_fence_usage_deref_unlocked(dev,
-                                                              entry->fence);
-                               entry->fence = NULL;
-                       }
-               }
-               mutex_lock(&dev->struct_mutex);
-               mutex_unlock(&entry->mutex);
-               if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
-                       list_del_init(&entry->ddestroy);
-                       drm_bo_destroy_locked(dev, entry);
-               }
                if (nentry) {
                        atomic_dec(&nentry->usage);
                }
        }
-
 }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
@@ -316,14 +467,14 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
 #endif
 {
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-       drm_device_t *dev = (drm_device_t *) data;
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_device *dev = (struct drm_device *) data;
+       struct drm_buffer_manager *bm = &dev->bm;
 #else
-       drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
-       drm_device_t *dev = container_of(bm, drm_device_t, bm);
+       struct drm_buffer_manager *bm =
+           container_of(work, struct drm_buffer_manager, wq.work);
+       struct drm_device *dev = container_of(bm, struct drm_device, bm);
 #endif
 
-
        DRM_DEBUG("Delayed delete Worker\n");
 
        mutex_lock(&dev->struct_mutex);
@@ -339,61 +490,116 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
 {
-       if (atomic_dec_and_test(&bo->usage)) {
-               drm_bo_destroy_locked(dev, bo);
+        struct drm_buffer_object *tmp_bo = *bo;
+       bo = NULL;
+
+       DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
+
+       if (atomic_dec_and_test(&tmp_bo->usage)) {
+               drm_bo_destroy_locked(tmp_bo);
        }
 }
+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
 
-static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
+static void drm_bo_base_deref_locked(struct drm_file * file_priv,
+                                    struct drm_user_object * uo)
 {
-       drm_bo_usage_deref_locked(priv->head->dev,
-                                 drm_user_object_entry(uo, drm_buffer_object_t,
-                                                       base));
+       struct drm_buffer_object *bo =
+           drm_user_object_entry(uo, struct drm_buffer_object, base);
+
+       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
+       drm_bo_takedown_vm_locked(bo);
+       drm_bo_usage_deref_locked(&bo);
 }
 
-void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
+void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
 {
-       if (atomic_dec_and_test(&bo->usage)) {
+       struct drm_buffer_object *tmp_bo = *bo;
+       struct drm_device *dev = tmp_bo->dev;
+
+       *bo = NULL;
+       if (atomic_dec_and_test(&tmp_bo->usage)) {
                mutex_lock(&dev->struct_mutex);
-               if (atomic_read(&bo->usage) == 0)
-                       drm_bo_destroy_locked(dev, bo);
+               if (atomic_read(&tmp_bo->usage) == 0)
+                       drm_bo_destroy_locked(tmp_bo);
+               mutex_unlock(&dev->struct_mutex);
+       }
+}
+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
+
+void drm_putback_buffer_objects(struct drm_device *dev)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct list_head *list = &bm->unfenced;
+       struct drm_buffer_object *entry, *next;
+
+       mutex_lock(&dev->struct_mutex);
+       list_for_each_entry_safe(entry, next, list, lru) {
+               atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
+
+               mutex_lock(&entry->mutex);
+               BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+               mutex_lock(&dev->struct_mutex);
+
+               list_del_init(&entry->lru);
+               DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+               DRM_WAKEUP(&entry->event_queue);
+
+               /*
+                * FIXME: Might want to put back on head of list
+                * instead of tail here.
+                */
+
+               drm_bo_add_to_lru(entry);
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_locked(&entry);
        }
+       mutex_unlock(&dev->struct_mutex);
 }
+EXPORT_SYMBOL(drm_putback_buffer_objects);
+
 
 /*
- * Note. The caller has to register (if applicable) 
+ * Note. The caller has to register (if applicable)
  * and deregister fence object usage.
  */
 
-int drm_fence_buffer_objects(drm_file_t * priv,
+int drm_fence_buffer_objects(struct drm_device *dev,
                             struct list_head *list,
                             uint32_t fence_flags,
-                            drm_fence_object_t * fence,
-                            drm_fence_object_t ** used_fence)
+                            struct drm_fence_object * fence,
+                            struct drm_fence_object ** used_fence)
 {
-       drm_device_t *dev = priv->head->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
-
-       drm_buffer_object_t *entry;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *entry;
        uint32_t fence_type = 0;
+       uint32_t fence_class = ~0;
        int count = 0;
        int ret = 0;
-       struct list_head f_list, *l;
+       struct list_head *l;
 
        mutex_lock(&dev->struct_mutex);
 
        if (!list)
                list = &bm->unfenced;
 
+       if (fence)
+               fence_class = fence->fence_class;
+
        list_for_each_entry(entry, list, lru) {
                BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
-               fence_type |= entry->fence_type;
-               if (entry->fence_class != 0) {
-                       DRM_ERROR("Fence class %d is not implemented yet.\n",
-                                 entry->fence_class);
+               fence_type |= entry->new_fence_type;
+               if (fence_class == ~0)
+                       fence_class = entry->new_fence_class;
+               else if (entry->new_fence_class != fence_class) {
+                       DRM_ERROR("Unmatching fence classes on unfenced list: "
+                                 "%d and %d.\n",
+                                 fence_class,
+                                 entry->new_fence_class);
                        ret = -EINVAL;
                        goto out;
                }
@@ -405,17 +611,9 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                goto out;
        }
 
-       /*
-        * Transfer to a local list before we release the dev->struct_mutex;
-        * This is so we don't get any new unfenced objects while fencing 
-        * the ones we already have..
-        */
-
-       list_add_tail(&f_list, list);
-       list_del_init(list);
-
        if (fence) {
-               if ((fence_type & fence->type) != fence_type) {
+               if ((fence_type & fence->type) != fence_type ||
+                   (fence->fence_class != fence_class)) {
                        DRM_ERROR("Given fence doesn't match buffers "
                                  "on unfenced list.\n");
                        ret = -EINVAL;
@@ -423,7 +621,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                }
        } else {
                mutex_unlock(&dev->struct_mutex);
-               ret = drm_fence_object_create(dev, fence_type,
+               ret = drm_fence_object_create(dev, fence_class, fence_type,
                                              fence_flags | DRM_FENCE_FLAG_EMIT,
                                              &fence);
                mutex_lock(&dev->struct_mutex);
@@ -432,9 +630,10 @@ int drm_fence_buffer_objects(drm_file_t * priv,
        }
 
        count = 0;
-       l = f_list.next;
-       while (l != &f_list) {
-               entry = list_entry(l, drm_buffer_object_t, lru);
+       l = list->next;
+       while (l != list) {
+               prefetch(l->next);
+               entry = list_entry(l, struct drm_buffer_object, lru);
                atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
                mutex_lock(&entry->mutex);
@@ -443,37 +642,37 @@ int drm_fence_buffer_objects(drm_file_t * priv,
                if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
                        count++;
                        if (entry->fence)
-                               drm_fence_usage_deref_locked(dev, entry->fence);
-                       entry->fence = fence;
+                               drm_fence_usage_deref_locked(&entry->fence);
+                       entry->fence = drm_fence_reference_locked(fence);
+                       entry->fence_class = entry->new_fence_class;
+                       entry->fence_type = entry->new_fence_type;
                        DRM_FLAG_MASKED(entry->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
                        DRM_WAKEUP(&entry->event_queue);
-                       drm_bo_add_to_lru(entry, bm);
+                       drm_bo_add_to_lru(entry);
                }
                mutex_unlock(&entry->mutex);
-               drm_bo_usage_deref_locked(dev, entry);
-               l = f_list.next;
+               drm_bo_usage_deref_locked(&entry);
+               l = list->next;
        }
-       atomic_add(count, &fence->usage);
        DRM_DEBUG("Fenced %d buffers\n", count);
       out:
        mutex_unlock(&dev->struct_mutex);
        *used_fence = fence;
        return ret;
 }
-
 EXPORT_SYMBOL(drm_fence_buffer_objects);
 
 /*
- * bo->mutex locked 
+ * bo->mutex locked
  */
 
-static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
-                       int no_wait, int force_no_move)
+static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
+                       int no_wait)
 {
        int ret = 0;
-       drm_device_t *dev = bo->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_mem_reg evict_mem;
 
        /*
         * Someone might have modified the buffer before we took the buffer mutex.
@@ -481,154 +680,254 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
 
        if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
                goto out;
-       if (!(bo->flags & drm_bo_type_flags(mem_type)))
+       if (bo->mem.mem_type != mem_type)
                goto out;
 
        ret = drm_bo_wait(bo, 0, 0, no_wait);
 
+       if (ret && ret != -EAGAIN) {
+               DRM_ERROR("Failed to expire fence before "
+                         "buffer eviction.\n");
+               goto out;
+       }
+
+       evict_mem = bo->mem;
+       evict_mem.mm_node = NULL;
+
+       evict_mem = bo->mem;
+       evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+       ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
+
        if (ret) {
                if (ret != -EAGAIN)
-                       DRM_ERROR("Failed to expire fence before "
-                                 "buffer eviction.\n");
+                       DRM_ERROR("Failed to find memory space for "
+                                 "buffer 0x%p eviction.\n", bo);
                goto out;
        }
 
-       if (mem_type == DRM_BO_MEM_TT) {
-               ret = drm_move_tt_to_local(bo, 1, force_no_move);
-               if (ret)
-                       goto out;
-               mutex_lock(&dev->struct_mutex);
-               list_del_init(&bo->lru);
-               drm_bo_add_to_lru(bo, bm);
-               mutex_unlock(&dev->struct_mutex);
-       }
+       ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
 
-       if (ret)
+       if (ret) {
+               if (ret != -EAGAIN)
+                       DRM_ERROR("Buffer eviction failed\n");
                goto out;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (evict_mem.mm_node) {
+               if (evict_mem.mm_node != bo->pinned_node)
+                       drm_mm_put_block(evict_mem.mm_node);
+               evict_mem.mm_node = NULL;
+       }
+       list_del(&bo->lru);
+       drm_bo_add_to_lru(bo);
+       mutex_unlock(&dev->struct_mutex);
 
        DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
                        _DRM_BO_FLAG_EVICTED);
+
       out:
        return ret;
 }
 
-/*
- * buf->mutex locked.
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
  */
-
-int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
-                      int no_wait)
+static int drm_bo_mem_force_space(struct drm_device * dev,
+                                 struct drm_bo_mem_reg * mem,
+                                 uint32_t mem_type, int no_wait)
 {
-       drm_device_t *dev = buf->dev;
-       drm_mm_node_t *node;
-       drm_buffer_manager_t *bm = &dev->bm;
-       drm_buffer_object_t *bo;
-       drm_mm_t *mm = &bm->manager[mem_type];
+       struct drm_mm_node *node;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *entry;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
        struct list_head *lru;
-       unsigned long size = buf->num_pages;
+       unsigned long num_pages = mem->num_pages;
        int ret;
 
        mutex_lock(&dev->struct_mutex);
        do {
-               node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
+               node = drm_mm_search_free(&man->manager, num_pages,
+                                         mem->page_alignment, 1);
                if (node)
                        break;
 
-               lru = &bm->lru[mem_type];
+               lru = &man->lru;
                if (lru->next == lru)
                        break;
 
-               bo = list_entry(lru->next, drm_buffer_object_t, lru);
-
-               atomic_inc(&bo->usage);
+               entry = list_entry(lru->next, struct drm_buffer_object, lru);
+               atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
-               mutex_lock(&bo->mutex);
-               BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
-               ret = drm_bo_evict(bo, mem_type, no_wait, 0);
-               mutex_unlock(&bo->mutex);
-               drm_bo_usage_deref_unlocked(dev, bo);
+               mutex_lock(&entry->mutex);
+               BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
+
+               ret = drm_bo_evict(entry, mem_type, no_wait);
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_unlocked(&entry);
                if (ret)
                        return ret;
                mutex_lock(&dev->struct_mutex);
        } while (1);
 
        if (!node) {
-               DRM_ERROR("Out of videoram / aperture space\n");
                mutex_unlock(&dev->struct_mutex);
                return -ENOMEM;
        }
 
-       node = drm_mm_get_block(node, size, buf->page_alignment);
+       node = drm_mm_get_block(node, num_pages, mem->page_alignment);
        mutex_unlock(&dev->struct_mutex);
-       BUG_ON(!node);
-       node->private = (void *)buf;
-
-       buf->mm_node = node;
-       buf->offset = node->start * PAGE_SIZE;
+       mem->mm_node = node;
+       mem->mem_type = mem_type;
        return 0;
 }
 
-static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
+static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
+                               uint32_t mem_type,
+                               uint32_t mask, uint32_t * res_mask)
 {
-       drm_device_t *dev = bo->dev;
-       drm_ttm_backend_t *be;
-       int ret;
+       uint32_t cur_flags = drm_bo_type_flags(mem_type);
+       uint32_t flag_diff;
 
-       if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
-               BUG_ON(bo->mm_node);
-               ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
-               if (ret)
-                       return ret;
+       if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
+               cur_flags |= DRM_BO_FLAG_CACHED;
+       if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
+               cur_flags |= DRM_BO_FLAG_MAPPABLE;
+       if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
+               DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
+
+       if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
+               return 0;
+
+       if (mem_type == DRM_BO_MEM_LOCAL) {
+               *res_mask = cur_flags;
+               return 1;
        }
 
-       DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
+       flag_diff = (mask ^ cur_flags);
+       if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+           (!(mask & DRM_BO_FLAG_CACHED) ||
+            (mask & DRM_BO_FLAG_FORCE_CACHING)))
+               return 0;
 
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
-                          bo->mm_node->start);
-       if (ret) {
-               drm_mm_put_block(bo->mm_node);
-               bo->mm_node = NULL;
+       if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+           ((mask & DRM_BO_FLAG_MAPPABLE) ||
+            (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
+               return 0;
+
+       *res_mask = cur_flags;
+       return 1;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver.  If free space isn't found, then
+ * drm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int drm_bo_mem_space(struct drm_buffer_object * bo,
+                    struct drm_bo_mem_reg * mem, int no_wait)
+{
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man;
+
+       uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
+       const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
+       uint32_t i;
+       uint32_t mem_type = DRM_BO_MEM_LOCAL;
+       uint32_t cur_flags;
+       int type_found = 0;
+       int type_ok = 0;
+       int has_eagain = 0;
+       struct drm_mm_node *node = NULL;
+       int ret;
+
+       mem->mm_node = NULL;
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bm->man[mem_type];
+
+               type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
+                                              &cur_flags);
+
+               if (!type_ok)
+                       continue;
+
+               if (mem_type == DRM_BO_MEM_LOCAL)
+                       break;
+
+               if ((mem_type == bo->pinned_mem_type) &&
+                   (bo->pinned_node != NULL)) {
+                       node = bo->pinned_node;
+                       break;
+               }
+
+               mutex_lock(&dev->struct_mutex);
+               if (man->has_type && man->use_type) {
+                       type_found = 1;
+                       node = drm_mm_search_free(&man->manager, mem->num_pages,
+                                                 mem->page_alignment, 1);
+                       if (node)
+                               node = drm_mm_get_block(node, mem->num_pages,
+                                                       mem->page_alignment);
+               }
+               mutex_unlock(&dev->struct_mutex);
+               if (node)
+                       break;
        }
-       mutex_unlock(&dev->struct_mutex);
 
-       if (ret) {
-               return ret;
+       if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
+               mem->mm_node = node;
+               mem->mem_type = mem_type;
+               mem->flags = cur_flags;
+               return 0;
        }
 
-       be = bo->ttm->be;
-       if (be->needs_ub_cache_adjust(be))
-               bo->flags &= ~DRM_BO_FLAG_CACHED;
-       bo->flags &= ~DRM_BO_MASK_MEM;
-       bo->flags |= DRM_BO_FLAG_MEM_TT;
+       if (!type_found)
+               return -EINVAL;
 
-       if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
-               ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
-               if (ret)
-                       DRM_ERROR("Could not flush read caches\n");
+       num_prios = dev->driver->bo_driver->num_mem_busy_prio;
+       prios = dev->driver->bo_driver->mem_busy_prio;
+
+       for (i = 0; i < num_prios; ++i) {
+               mem_type = prios[i];
+               man = &bm->man[mem_type];
+
+               if (!man->has_type)
+                       continue;
+
+               if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
+                       continue;
+
+               ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
+
+               if (ret == 0) {
+                       mem->flags = cur_flags;
+                       return 0;
+               }
+
+               if (ret == -EAGAIN)
+                       has_eagain = 1;
        }
-       DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
 
-       return 0;
+       ret = (has_eagain) ? -EAGAIN : -ENOMEM;
+       return ret;
 }
 
-static int drm_bo_new_flags(drm_device_t * dev,
-                           uint32_t flags, uint32_t new_mask, uint32_t hint,
-                           int init, uint32_t * n_flags, uint32_t * n_mask)
+EXPORT_SYMBOL(drm_bo_mem_space);
+
+static int drm_bo_new_mask(struct drm_buffer_object * bo,
+                          uint64_t new_mask, uint32_t hint)
 {
-       uint32_t new_flags = 0;
        uint32_t new_props;
-       drm_bo_driver_t *driver = dev->driver->bo_driver;
-       drm_buffer_manager_t *bm = &dev->bm;
-       unsigned i;
-
-       /*
-        * First adjust the mask to take away nonexistant memory types. 
-        */
 
-       for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
-               if (!bm->use_type[i])
-                       new_mask &= ~drm_bo_type_flags(i);
+       if (bo->type == drm_bo_type_user) {
+               DRM_ERROR("User buffers are not supported yet\n");
+               return -EINVAL;
        }
 
        if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
@@ -637,62 +936,11 @@ static int drm_bo_new_flags(drm_device_t * dev,
                     "processes\n");
                return -EPERM;
        }
-       if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
-               if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
-                    !driver->cached[DRM_BO_MEM_TT]) &&
-                   ((new_mask & DRM_BO_FLAG_MEM_VRAM)
-                    && !driver->cached[DRM_BO_MEM_VRAM])) {
-                       new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
-               } else {
-                       if (!driver->cached[DRM_BO_MEM_TT])
-                               new_flags &= DRM_BO_FLAG_MEM_TT;
-                       if (!driver->cached[DRM_BO_MEM_VRAM])
-                               new_flags &= DRM_BO_FLAG_MEM_VRAM;
-               }
-       }
-
-       if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
-           !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
-               if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
-                   !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
-                       DRM_ERROR
-                           ("Cannot read cached from a pinned VRAM / TT buffer\n");
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * Determine new memory location:
-        */
-
-       if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
 
-               new_flags = new_mask & DRM_BO_MASK_MEM;
-
-               if (!new_flags) {
-                       DRM_ERROR("Invalid buffer object memory flags\n");
-                       return -EINVAL;
-               }
-
-               if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
-                       if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
-                           new_flags & (DRM_BO_FLAG_MEM_VRAM |
-                                        DRM_BO_FLAG_MEM_TT)) {
-                               new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
-                       } else {
-                               new_flags = DRM_BO_FLAG_MEM_LOCAL;
-                       }
-               }
-               if (new_flags & DRM_BO_FLAG_MEM_TT) {
-                       if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
-                           new_flags & DRM_BO_FLAG_MEM_VRAM) {
-                               new_flags = DRM_BO_FLAG_MEM_VRAM;
-                       } else {
-                               new_flags = DRM_BO_FLAG_MEM_TT;
-                       }
-               }
-       } else {
-               new_flags = flags & DRM_BO_MASK_MEM;
+       if ((new_mask & DRM_BO_FLAG_NO_MOVE)) {
+               DRM_ERROR
+                       ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
+               return -EPERM;
        }
 
        new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
@@ -703,22 +951,7 @@ static int drm_bo_new_flags(drm_device_t * dev,
                return -EINVAL;
        }
 
-       new_flags |= new_mask & ~DRM_BO_MASK_MEM;
-
-       if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
-           (new_flags & DRM_BO_FLAG_NO_EVICT) &&
-           (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
-               if (!(flags & DRM_BO_FLAG_CACHED)) {
-                       DRM_ERROR
-                           ("Cannot change caching policy of pinned buffer\n");
-                       return -EINVAL;
-               } else {
-                       new_flags &= ~DRM_BO_FLAG_CACHED;
-               }
-       }
-
-       *n_flags = new_flags;
-       *n_mask = new_mask;
+       bo->mem.mask = new_mask;
        return 0;
 }
 
@@ -726,28 +959,29 @@ static int drm_bo_new_flags(drm_device_t * dev,
  * Call dev->struct_mutex locked.
  */
 
-drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
                                              uint32_t handle, int check_owner)
 {
-       drm_user_object_t *uo;
-       drm_buffer_object_t *bo;
+       struct drm_user_object *uo;
+       struct drm_buffer_object *bo;
 
-       uo = drm_lookup_user_object(priv, handle);
+       uo = drm_lookup_user_object(file_priv, handle);
 
        if (!uo || (uo->type != drm_buffer_type)) {
                DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
                return NULL;
        }
 
-       if (check_owner && priv != uo->owner) {
-               if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
+       if (check_owner && file_priv != uo->owner) {
+               if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
                        return NULL;
        }
 
-       bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
+       bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
        atomic_inc(&bo->usage);
        return bo;
 }
+EXPORT_SYMBOL(drm_lookup_buffer_object);
 
 /*
  * Call bo->mutex locked.
@@ -755,16 +989,14 @@ drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
  */
 
-static int drm_bo_quick_busy(drm_buffer_object_t * bo)
+static int drm_bo_quick_busy(struct drm_buffer_object * bo)
 {
-       drm_fence_object_t *fence = bo->fence;
+       struct drm_fence_object *fence = bo->fence;
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
-               drm_device_t *dev = bo->dev;
-               if (drm_fence_object_signaled(fence, bo->fence_type)) {
-                       drm_fence_usage_deref_unlocked(dev, fence);
-                       bo->fence = NULL;
+               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
                return 1;
@@ -777,22 +1009,19 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
  */
 
-static int drm_bo_busy(drm_buffer_object_t * bo)
+static int drm_bo_busy(struct drm_buffer_object * bo)
 {
-       drm_fence_object_t *fence = bo->fence;
+       struct drm_fence_object *fence = bo->fence;
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
-               drm_device_t *dev = bo->dev;
-               if (drm_fence_object_signaled(fence, bo->fence_type)) {
-                       drm_fence_usage_deref_unlocked(dev, fence);
-                       bo->fence = NULL;
+               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
-               drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
-               if (drm_fence_object_signaled(fence, bo->fence_type)) {
-                       drm_fence_usage_deref_unlocked(dev, fence);
-                       bo->fence = NULL;
+               drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
+               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
                return 1;
@@ -800,13 +1029,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
        return 0;
 }
 
-static int drm_bo_read_cached(drm_buffer_object_t * bo)
+static int drm_bo_read_cached(struct drm_buffer_object * bo)
 {
        int ret = 0;
 
        BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       if (bo->mm_node)
-               ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
+       if (bo->mem.mm_node)
+               ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
        return ret;
 }
 
@@ -814,7 +1043,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo)
  * Wait until a buffer is unmapped.
  */
 
-static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
+static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
 {
        int ret = 0;
 
@@ -830,7 +1059,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
        return ret;
 }
 
-static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
+static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
 {
        int ret;
 
@@ -847,39 +1076,32 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
  * 1) validating
  * 2) submitting commands
  * 3) fencing
- * Should really be an atomic operation. 
+ * Should really be an atomic operation.
  * We now "solve" this problem by keeping
  * the buffer "unfenced" after validating, but before fencing.
  */
 
-static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
+static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
                                int eagain_if_wait)
 {
        int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       unsigned long _end = jiffies + 3 * DRM_HZ;
 
        if (ret && no_wait)
                return -EBUSY;
        else if (!ret)
                return 0;
 
-       do {
-               mutex_unlock(&bo->mutex);
-               DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-                           !drm_bo_check_unfenced(bo));
-               mutex_lock(&bo->mutex);
-               if (ret == -EINTR)
-                       return -EAGAIN;
-               if (ret) {
-                       DRM_ERROR
-                           ("Error waiting for buffer to become fenced\n");
-                       return ret;
-               }
-               ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       } while (ret && !time_after_eq(jiffies, _end));
+       ret = 0;
+       mutex_unlock(&bo->mutex);
+       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+                   !drm_bo_check_unfenced(bo));
+       mutex_lock(&bo->mutex);
+       if (ret == -EINTR)
+               return -EAGAIN;
+       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (ret) {
                DRM_ERROR("Timeout waiting for buffer to become fenced\n");
-               return ret;
+               return -EBUSY;
        }
        if (eagain_if_wait)
                return -EAGAIN;
@@ -889,28 +1111,25 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
 
 /*
  * Fill in the ioctl reply argument with buffer info.
- * Bo locked. 
+ * Bo locked.
  */
 
-static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
-                               drm_bo_arg_reply_t * rep)
+static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
+                               struct drm_bo_info_rep *rep)
 {
+       if (!rep)
+               return;
+
        rep->handle = bo->base.hash.key;
-       rep->flags = bo->flags;
+       rep->flags = bo->mem.flags;
        rep->size = bo->num_pages * PAGE_SIZE;
        rep->offset = bo->offset;
-
-       if (bo->ttm_object) {
-               rep->arg_handle = bo->ttm_object->map_list.user_token;
-       } else {
-               rep->arg_handle = 0;
-       }
-
-       rep->mask = bo->mask;
+       rep->arg_handle = bo->map_list.user_token;
+       rep->mask = bo->mem.mask;
        rep->buffer_start = bo->buffer_start;
        rep->fence_flags = bo->fence_type;
        rep->rep_flags = 0;
-       rep->page_alignment = bo->page_alignment;
+       rep->page_alignment = bo->mem.page_alignment;
 
        if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
                DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
@@ -920,33 +1139,31 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
 
 /*
  * Wait for buffer idle and register that we've mapped the buffer.
- * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
- * so that if the client dies, the mapping is automatically 
+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
+ * so that if the client dies, the mapping is automatically
  * unregistered.
  */
 
-static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
                                 uint32_t map_flags, unsigned hint,
-                                drm_bo_arg_reply_t * rep)
+                                struct drm_bo_info_rep *rep)
 {
-       drm_buffer_object_t *bo;
-       drm_device_t *dev = priv->head->dev;
+       struct drm_buffer_object *bo;
+       struct drm_device *dev = file_priv->head->dev;
        int ret = 0;
        int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
 
        mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(priv, handle, 1);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
        mutex_unlock(&dev->struct_mutex);
 
        if (!bo)
                return -EINVAL;
 
        mutex_lock(&bo->mutex);
-       if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
-               ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+       if (ret)
+               goto out;
 
        /*
         * If this returns true, we are currently unmapped.
@@ -968,14 +1185,14 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                        }
 
                        if ((map_flags & DRM_BO_FLAG_READ) &&
-                           (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
-                           (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+                           (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
+                           (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
                                drm_bo_read_cached(bo);
                        }
                        break;
                } else if ((map_flags & DRM_BO_FLAG_READ) &&
-                          (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
-                          (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+                          (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
+                          (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
 
                        /*
                         * We are already mapped with different flags.
@@ -992,7 +1209,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
        }
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+       ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
                if (atomic_add_negative(-1, &bo->mapped))
@@ -1002,33 +1219,33 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
                drm_bo_fill_rep_arg(bo, rep);
       out:
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(dev, bo);
+       drm_bo_usage_deref_unlocked(&bo);
        return ret;
 }
 
-static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
 {
-       drm_device_t *dev = priv->head->dev;
-       drm_buffer_object_t *bo;
-       drm_ref_object_t *ro;
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+       struct drm_ref_object *ro;
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
 
-       bo = drm_lookup_buffer_object(priv, handle, 1);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
        if (!bo) {
                ret = -EINVAL;
                goto out;
        }
 
-       ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+       ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
        if (!ro) {
                ret = -EINVAL;
                goto out;
        }
 
-       drm_remove_ref_object(priv, ro);
-       drm_bo_usage_deref_locked(dev, bo);
+       drm_remove_ref_object(file_priv, ro);
+       drm_bo_usage_deref_locked(&bo);
       out:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1038,12 +1255,12 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
  * Call struct-sem locked.
  */
 
-static void drm_buffer_user_object_unmap(drm_file_t * priv,
-                                        drm_user_object_t * uo,
-                                        drm_ref_t action)
+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
+                                        struct drm_user_object * uo,
+                                        enum drm_ref_type action)
 {
-       drm_buffer_object_t *bo =
-           drm_user_object_entry(uo, drm_buffer_object_t, base);
+       struct drm_buffer_object *bo =
+           drm_user_object_entry(uo, struct drm_buffer_object, base);
 
        /*
         * We DON'T want to take the bo->lock here, because we want to
@@ -1057,163 +1274,227 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
 }
 
 /*
- * bo->mutex locked. 
+ * bo->mutex locked.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
  */
 
-static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
-                             int no_wait, int force_no_move)
+int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
+                      int no_wait, int move_unfenced)
 {
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
        int ret = 0;
-
+       struct drm_bo_mem_reg mem;
        /*
         * Flush outstanding fences.
         */
+
        drm_bo_busy(bo);
 
        /*
-        * Make sure we're not mapped.
+        * Wait for outstanding fences.
         */
 
-       ret = drm_bo_wait_unmapped(bo, no_wait);
+       ret = drm_bo_wait(bo, 0, 0, no_wait);
        if (ret)
                return ret;
 
-       /*
-        * Wait for outstanding fences.
-        */
+       mem.num_pages = bo->num_pages;
+       mem.size = mem.num_pages << PAGE_SHIFT;
+       mem.mask = new_mem_flags;
+       mem.page_alignment = bo->mem.page_alignment;
 
-       ret = drm_bo_wait(bo, 0, 0, no_wait);
+       mutex_lock(&bm->evict_mutex);
+       mutex_lock(&dev->struct_mutex);
+       list_del(&bo->lru);
+       list_add_tail(&bo->lru, &bm->unfenced);
+       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                       _DRM_BO_FLAG_UNFENCED);
+       mutex_unlock(&dev->struct_mutex);
 
-       if (ret == -EINTR)
-               return -EAGAIN;
+       /*
+        * Determine where to move the buffer.
+        */
+       ret = drm_bo_mem_space(bo, &mem, no_wait);
        if (ret)
-               return ret;
+               goto out_unlock;
 
-       if (new_flags & DRM_BO_FLAG_MEM_TT) {
-               ret = drm_move_local_to_tt(bo, no_wait);
-               if (ret)
-                       return ret;
-       } else {
-               drm_move_tt_to_local(bo, 0, force_no_move);
+       ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
+
+ out_unlock:
+       if (ret || !move_unfenced) {
+               mutex_lock(&dev->struct_mutex);
+               if (mem.mm_node) {
+                       if (mem.mm_node != bo->pinned_node)
+                               drm_mm_put_block(mem.mm_node);
+                       mem.mm_node = NULL;
+               }
+               DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+               DRM_WAKEUP(&bo->event_queue);
+               list_del(&bo->lru);
+               drm_bo_add_to_lru(bo);
+               mutex_unlock(&dev->struct_mutex);
        }
 
-       return 0;
+       mutex_unlock(&bm->evict_mutex);
+       return ret;
+}
+
+static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
+{
+       uint32_t flag_diff = (mem->mask ^ mem->flags);
+
+       if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+               return 0;
+       if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+           (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
+            (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
+         return 0;
+       }
+       if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+           ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
+            (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+               return 0;
+       return 1;
 }
 
 /*
  * bo locked.
  */
 
-static int drm_buffer_object_validate(drm_buffer_object_t * bo,
-                                     uint32_t new_flags,
+static int drm_buffer_object_validate(struct drm_buffer_object * bo,
+                                     uint32_t fence_class,
                                      int move_unfenced, int no_wait)
 {
-       drm_device_t *dev = bo->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
-       uint32_t flag_diff = (new_flags ^ bo->flags);
-       drm_bo_driver_t *driver = dev->driver->bo_driver;
-
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       uint32_t ftype;
        int ret;
 
-       if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
-               DRM_ERROR("Vram support not implemented yet\n");
-               return -EINVAL;
-       }
+       DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
+                 (unsigned long long) bo->mem.mask,
+                 (unsigned long long) bo->mem.flags);
+
+       ret = driver->fence_type(bo, &fence_class, &ftype);
 
-       DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
-       ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
        if (ret) {
                DRM_ERROR("Driver did not support given buffer permissions\n");
                return ret;
        }
 
        /*
-        * Move out if we need to change caching policy.
+        * We're switching command submission mechanism,
+        * or cannot simply rely on the hardware serializing for us.
+        *
+        * Wait for buffer idle.
         */
 
-       if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
-           !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
-               if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
-                       DRM_ERROR("Cannot change caching policy of "
-                                 "pinned buffer.\n");
-                       return -EINVAL;
-               }
-               ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
+       if ((fence_class != bo->fence_class) ||
+           ((ftype ^ bo->fence_type) & bo->fence_type)) {
+
+               ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+               if (ret)
+                       return ret;
+
+       }
+
+       bo->new_fence_class = fence_class;
+       bo->new_fence_type = ftype;
+
+       ret = drm_bo_wait_unmapped(bo, no_wait);
+       if (ret) {
+               DRM_ERROR("Timed out waiting for buffer unmap.\n");
+               return ret;
+       }
+
+       /*
+        * Check whether we need to move buffer.
+        */
+
+       if (!drm_bo_mem_compat(&bo->mem)) {
+               ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+                                        move_unfenced);
                if (ret) {
                        if (ret != -EAGAIN)
                                DRM_ERROR("Failed moving buffer.\n");
                        return ret;
                }
        }
-       DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
-       flag_diff = (new_flags ^ bo->flags);
 
        /*
-        * Check whether we dropped no_move policy, and in that case,
-        * release reserved manager regions.
+        * Pinned buffers.
         */
 
-       if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
-           !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
+       if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+               bo->pinned_mem_type = bo->mem.mem_type;
                mutex_lock(&dev->struct_mutex);
-               if (bo->mm_node) {
-                       drm_mm_put_block(bo->mm_node);
-                       bo->mm_node = NULL;
+               list_del_init(&bo->pinned_lru);
+               drm_bo_add_to_pinned_lru(bo);
+
+               if (bo->pinned_node != bo->mem.mm_node) {
+                       if (bo->pinned_node != NULL)
+                               drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = bo->mem.mm_node;
                }
+
+               mutex_unlock(&dev->struct_mutex);
+
+       } else if (bo->pinned_node != NULL) {
+
+               mutex_lock(&dev->struct_mutex);
+
+               if (bo->pinned_node != bo->mem.mm_node)
+                       drm_mm_put_block(bo->pinned_node);
+
+               list_del_init(&bo->pinned_lru);
+               bo->pinned_node = NULL;
                mutex_unlock(&dev->struct_mutex);
+
        }
 
        /*
-        * Check whether we need to move buffer.
+        * We might need to add a TTM.
         */
 
-       if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
-               ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
-               if (ret) {
-                       if (ret != -EAGAIN)
-                               DRM_ERROR("Failed moving buffer.\n");
+       if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
+               ret = drm_bo_add_ttm(bo);
+               if (ret)
                        return ret;
-               }
        }
+       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
 
-       if (move_unfenced) {
-
-               /*
-                * Place on unfenced list.
-                */
+       /*
+        * Finally, adjust lru to be sure.
+        */
 
+       mutex_lock(&dev->struct_mutex);
+       list_del(&bo->lru);
+       if (move_unfenced) {
+               list_add_tail(&bo->lru, &bm->unfenced);
                DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
                                _DRM_BO_FLAG_UNFENCED);
-               mutex_lock(&dev->struct_mutex);
-               list_del(&bo->lru);
-               list_add_tail(&bo->lru, &bm->unfenced);
-               mutex_unlock(&dev->struct_mutex);
        } else {
-
-               mutex_lock(&dev->struct_mutex);
-               list_del_init(&bo->lru);
-               drm_bo_add_to_lru(bo, bm);
-               mutex_unlock(&dev->struct_mutex);
+               drm_bo_add_to_lru(bo);
+               if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       DRM_WAKEUP(&bo->event_queue);
+                       DRM_FLAG_MASKED(bo->priv_flags, 0,
+                                       _DRM_BO_FLAG_UNFENCED);
+               }
        }
+       mutex_unlock(&dev->struct_mutex);
 
-       bo->flags = new_flags;
        return 0;
 }
 
-static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
-                                 uint32_t flags, uint32_t mask, uint32_t hint,
-                                 drm_bo_arg_reply_t * rep)
+int drm_bo_do_validate(struct drm_buffer_object *bo,
+                      uint64_t flags, uint64_t mask, uint32_t hint,
+                      uint32_t fence_class,
+                      int no_wait,
+                      struct drm_bo_info_rep *rep)
 {
-       drm_buffer_object_t *bo;
-       drm_device_t *dev = priv->head->dev;
        int ret;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-       uint32_t new_flags;
-
-       bo = drm_lookup_buffer_object(priv, handle, 1);
-       if (!bo) {
-               return -EINVAL;
-       }
 
        mutex_lock(&bo->mutex);
        ret = drm_bo_wait_unfenced(bo, no_wait, 0);
@@ -1221,32 +1502,75 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
        if (ret)
                goto out;
 
-       ret = drm_bo_new_flags(dev, bo->flags,
-                              (flags & mask) | (bo->mask & ~mask), hint,
-                              0, &new_flags, &bo->mask);
 
+       DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
+       ret = drm_bo_new_mask(bo, flags, hint);
        if (ret)
                goto out;
 
-       ret =
-           drm_buffer_object_validate(bo, new_flags,
-                                      !(hint & DRM_BO_HINT_DONT_FENCE),
-                                      no_wait);
-       drm_bo_fill_rep_arg(bo, rep);
-
-      out:
+       ret = drm_buffer_object_validate(bo,
+                                        fence_class,
+                                        !(hint & DRM_BO_HINT_DONT_FENCE),
+                                        no_wait);
+out:
+       if (rep)
+               drm_bo_fill_rep_arg(bo, rep);
 
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(dev, bo);
        return ret;
 }
+EXPORT_SYMBOL(drm_bo_do_validate);
+
+
+int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
+                          uint32_t fence_class,
+                          uint64_t flags, uint64_t mask, uint32_t hint,
+                          struct drm_bo_info_rep * rep,
+                          struct drm_buffer_object **bo_rep)
+{
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+       int ret;
+       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (!bo) {
+               return -EINVAL;
+       }
+       
+       /*
+        * Only allow creator to change shared buffer mask.
+        */
+
+       if (bo->base.owner != file_priv) 
+               mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
+
+               
+       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
+                                no_wait, rep);
+
+       if (!ret && bo_rep)
+               *bo_rep = bo;
+       else
+               drm_bo_usage_deref_unlocked(&bo);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_bo_handle_validate);
 
-static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
-                             drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
+                             struct drm_bo_info_rep *rep)
 {
-       drm_buffer_object_t *bo;
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
+
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
 
-       bo = drm_lookup_buffer_object(priv, handle, 1);
        if (!bo) {
                return -EINVAL;
        }
@@ -1255,18 +1579,23 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
                (void)drm_bo_busy(bo);
        drm_bo_fill_rep_arg(bo, rep);
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(bo->dev, bo);
+       drm_bo_usage_deref_unlocked(&bo);
        return 0;
 }
 
-static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
-                             uint32_t hint, drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
+                             uint32_t hint,
+                             struct drm_bo_info_rep *rep)
 {
-       drm_buffer_object_t *bo;
+       struct drm_device *dev = file_priv->head->dev;
+       struct drm_buffer_object *bo;
        int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
        int ret;
 
-       bo = drm_lookup_buffer_object(priv, handle, 1);
+       mutex_lock(&dev->struct_mutex);
+       bo = drm_lookup_buffer_object(file_priv, handle, 1);
+       mutex_unlock(&dev->struct_mutex);
+
        if (!bo) {
                return -EINVAL;
        }
@@ -1283,68 +1612,25 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
 
       out:
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(bo->dev, bo);
+       drm_bo_usage_deref_unlocked(&bo);
        return ret;
 }
 
-/*
- * Call bo->mutex locked.
- */
-
-static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
+int drm_buffer_object_create(struct drm_device *dev,
+                            unsigned long size,
+                            enum drm_bo_type type,
+                            uint64_t mask,
+                            uint32_t hint,
+                            uint32_t page_alignment,
+                            unsigned long buffer_start,
+                            struct drm_buffer_object ** buf_obj)
 {
-       drm_device_t *dev = bo->dev;
-       drm_ttm_object_t *to = NULL;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *bo;
        int ret = 0;
-       uint32_t ttm_flags = 0;
-
-       bo->ttm_object = NULL;
-       bo->ttm = NULL;
+       unsigned long num_pages;
 
-       switch (bo->type) {
-       case drm_bo_type_dc:
-               mutex_lock(&dev->struct_mutex);
-               ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
-                                           ttm_flags, &to);
-               mutex_unlock(&dev->struct_mutex);
-               break;
-       case drm_bo_type_user:
-       case drm_bo_type_fake:
-               break;
-       default:
-               DRM_ERROR("Illegal buffer object type\n");
-               ret = -EINVAL;
-               break;
-       }
-
-       if (ret) {
-               return ret;
-       }
-
-       if (to) {
-               bo->ttm_object = to;
-               bo->ttm = drm_ttm_from_object(to);
-       }
-       return ret;
-}
-
-int drm_buffer_object_create(drm_file_t * priv,
-                            unsigned long size,
-                            drm_bo_type_t type,
-                            uint32_t mask,
-                            uint32_t hint,
-                            uint32_t page_alignment,
-                            unsigned long buffer_start,
-                            drm_buffer_object_t ** buf_obj)
-{
-       drm_device_t *dev = priv->head->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
-       drm_buffer_object_t *bo;
-       int ret = 0;
-       uint32_t new_flags;
-       unsigned long num_pages;
-
-       if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
+       if (buffer_start & ~PAGE_MASK) {
                DRM_ERROR("Invalid buffer object start.\n");
                return -EINVAL;
        }
@@ -1366,31 +1652,43 @@ int drm_buffer_object_create(drm_file_t * priv,
        atomic_set(&bo->mapped, -1);
        DRM_INIT_WAITQUEUE(&bo->event_queue);
        INIT_LIST_HEAD(&bo->lru);
+       INIT_LIST_HEAD(&bo->pinned_lru);
        INIT_LIST_HEAD(&bo->ddestroy);
+#ifdef DRM_ODD_MM_COMPAT
+       INIT_LIST_HEAD(&bo->p_mm_list);
+       INIT_LIST_HEAD(&bo->vma_list);
+#endif
        bo->dev = dev;
-       bo->type = type;
+       if (buffer_start != 0)
+               bo->type = drm_bo_type_user;
+       else
+               bo->type = type;
        bo->num_pages = num_pages;
-       bo->mm_node = NULL;
-       bo->page_alignment = page_alignment;
-       if (bo->type == drm_bo_type_fake) {
-               bo->offset = buffer_start;
-               bo->buffer_start = 0;
-       } else {
-               bo->buffer_start = buffer_start;
-       }
+       bo->mem.mem_type = DRM_BO_MEM_LOCAL;
+       bo->mem.num_pages = bo->num_pages;
+       bo->mem.mm_node = NULL;
+       bo->mem.page_alignment = page_alignment;
+       bo->buffer_start = buffer_start;
        bo->priv_flags = 0;
-       bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | 
+               DRM_BO_FLAG_MAPPABLE;
+       bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+               DRM_BO_FLAG_MAPPABLE;
        atomic_inc(&bm->count);
-       ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
-                              1, &new_flags, &bo->mask);
-       if (ret)
-               goto out_err;
-       ret = drm_bo_add_ttm(priv, bo);
+       ret = drm_bo_new_mask(bo, mask, hint);
+
        if (ret)
                goto out_err;
 
-       ret = drm_buffer_object_validate(bo, new_flags, 0,
-                                        hint & DRM_BO_HINT_DONT_BLOCK);
+       if (bo->type == drm_bo_type_dc) {
+               mutex_lock(&dev->struct_mutex);
+               ret = drm_bo_setup_vm_locked(bo);
+               mutex_unlock(&dev->struct_mutex);
+               if (ret)
+                       goto out_err;
+       }
+
+       ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
        if (ret)
                goto out_err;
 
@@ -1400,18 +1698,21 @@ int drm_buffer_object_create(drm_file_t * priv,
 
       out_err:
        mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(dev, bo);
+
+       drm_bo_usage_deref_unlocked(&bo);
        return ret;
 }
+EXPORT_SYMBOL(drm_buffer_object_create);
+
 
-static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
-                                 int shareable)
+static int drm_bo_add_user_object(struct drm_file *file_priv,
+                                 struct drm_buffer_object *bo, int shareable)
 {
-       drm_device_t *dev = priv->head->dev;
+       struct drm_device *dev = file_priv->head->dev;
        int ret;
 
        mutex_lock(&dev->struct_mutex);
-       ret = drm_add_user_object(priv, &bo->base, shareable);
+       ret = drm_add_user_object(file_priv, &bo->base, shareable);
        if (ret)
                goto out;
 
@@ -1425,253 +1726,311 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
        return ret;
 }
 
-static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_create_arg *arg = data;
+       struct drm_bo_create_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       struct drm_buffer_object *entry;
+       int ret = 0;
+
+       DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
+           (int)(req->size / 1024), req->page_alignment * 4);
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_buffer_object_create(file_priv->head->dev,
+                                      req->size, drm_bo_type_dc, req->mask,
+                                      req->hint, req->page_alignment,
+                                      req->buffer_start, &entry);
+       if (ret)
+               goto out;
+       
+       ret = drm_bo_add_user_object(file_priv, entry,
+                                    req->mask & DRM_BO_FLAG_SHAREABLE);
+       if (ret) {
+               drm_bo_usage_deref_unlocked(&entry);
+               goto out;
+       }
+       
+       mutex_lock(&entry->mutex);
+       drm_bo_fill_rep_arg(entry, rep);
+       mutex_unlock(&entry->mutex);
+
+out:
+       return ret;
+}
+
+int drm_bo_setstatus_ioctl(struct drm_device *dev, 
+                          void *data, struct drm_file *file_priv)
 {
-       LOCK_TEST_WITH_RETURN(dev, filp);
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+                                    req->flags,
+                                    req->mask,
+                                    req->hint | DRM_BO_HINT_DONT_FENCE,
+                                    rep, NULL);
+
+       if (ret)
+               return ret;
+
        return 0;
 }
 
-int drm_bo_ioctl(DRM_IOCTL_ARGS)
+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
-       DRM_DEVICE;
-       drm_bo_arg_t arg;
-       drm_bo_arg_request_t *req = &arg.d.req;
-       drm_bo_arg_reply_t rep;
-       unsigned long next;
-       drm_user_object_t *uo;
-       drm_buffer_object_t *entry;
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
+                                   req->hint, rep);
+       if (ret)
+               return ret;
 
+       return 0;
+}
+
+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_handle_arg *arg = data;
+       int ret;
        if (!dev->bm.initialized) {
                DRM_ERROR("Buffer object manager is not initialized.\n");
                return -EINVAL;
        }
 
-       do {
-               DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+       ret = drm_buffer_object_unmap(file_priv, arg->handle);
+       return ret;
+}
 
-               if (arg.handled) {
-                       data = arg.next;
-                       continue;
+
+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_reference_info_arg *arg = data;
+       struct drm_bo_handle_arg *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       struct drm_user_object *uo;
+       int ret;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_user_object_ref(file_priv, req->handle,
+                                 drm_buffer_type, &uo);
+       if (ret)
+               return ret;
+       
+       ret = drm_bo_handle_info(file_priv, req->handle, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_handle_arg *arg = data;
+       int ret = 0;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
+       return ret;
+}
+
+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_reference_info_arg *arg = data;
+       struct drm_bo_handle_arg *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_info(file_priv, req->handle, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_map_wait_idle_arg *arg = data;
+       struct drm_bo_info_req *req = &arg->d.req;
+       struct drm_bo_info_rep *rep = &arg->d.rep;
+       int ret;
+       if (!dev->bm.initialized) {
+               DRM_ERROR("Buffer object manager is not initialized.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_bo_handle_wait(file_priv, req->handle,
+                                req->hint, rep);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int drm_bo_leave_list(struct drm_buffer_object * bo,
+                            uint32_t mem_type,
+                            int free_pinned, int allow_errors)
+{
+       struct drm_device *dev = bo->dev;
+       int ret = 0;
+
+       mutex_lock(&bo->mutex);
+
+       ret = drm_bo_expire_fence(bo, allow_errors);
+       if (ret)
+               goto out;
+
+       if (free_pinned) {
+               DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
+               mutex_lock(&dev->struct_mutex);
+               list_del_init(&bo->pinned_lru);
+               if (bo->pinned_node == bo->mem.mm_node)
+                       bo->pinned_node = NULL;
+               if (bo->pinned_node != NULL) {
+                       drm_mm_put_block(bo->pinned_node);
+                       bo->pinned_node = NULL;
                }
+               mutex_unlock(&dev->struct_mutex);
+       }
 
-               rep.ret = 0;
-               switch (req->op) {
-               case drm_bo_create:
-                       rep.ret =
-                           drm_buffer_object_create(priv, req->size,
-                                                    req->type,
-                                                    req->mask,
-                                                    req->hint,
-                                                    req->page_alignment,
-                                                    req->buffer_start, &entry);
-                       if (rep.ret)
-                               break;
-
-                       rep.ret =
-                           drm_bo_add_user_object(priv, entry,
-                                                  req->
-                                                  mask &
-                                                  DRM_BO_FLAG_SHAREABLE);
-                       if (rep.ret)
-                               drm_bo_usage_deref_unlocked(dev, entry);
-
-                       if (rep.ret)
-                               break;
-
-                       mutex_lock(&entry->mutex);
-                       drm_bo_fill_rep_arg(entry, &rep);
-                       mutex_unlock(&entry->mutex);
-                       break;
-               case drm_bo_unmap:
-                       rep.ret = drm_buffer_object_unmap(priv, req->handle);
-                       break;
-               case drm_bo_map:
-                       rep.ret = drm_buffer_object_map(priv, req->handle,
-                                                       req->mask,
-                                                       req->hint, &rep);
-                       break;
-               case drm_bo_destroy:
-                       mutex_lock(&dev->struct_mutex);
-                       uo = drm_lookup_user_object(priv, req->handle);
-                       if (!uo || (uo->type != drm_buffer_type)
-                           || uo->owner != priv) {
-                               mutex_unlock(&dev->struct_mutex);
-                               rep.ret = -EINVAL;
-                               break;
-                       }
-                       rep.ret = drm_remove_user_object(priv, uo);
-                       mutex_unlock(&dev->struct_mutex);
-                       break;
-               case drm_bo_reference:
-                       rep.ret = drm_user_object_ref(priv, req->handle,
-                                                     drm_buffer_type, &uo);
-                       if (rep.ret)
-                               break;
-                       mutex_lock(&dev->struct_mutex);
-                       uo = drm_lookup_user_object(priv, req->handle);
-                       entry =
-                           drm_user_object_entry(uo, drm_buffer_object_t,
-                                                 base);
-                       atomic_dec(&entry->usage);
-                       mutex_unlock(&dev->struct_mutex);
-                       mutex_lock(&entry->mutex);
-                       drm_bo_fill_rep_arg(entry, &rep);
-                       mutex_unlock(&entry->mutex);
-                       break;
-               case drm_bo_unreference:
-                       rep.ret = drm_user_object_unref(priv, req->handle,
-                                                       drm_buffer_type);
-                       break;
-               case drm_bo_validate:
-                       rep.ret = drm_bo_lock_test(dev, filp);
-
-                       if (rep.ret)
-                               break;
-                       rep.ret =
-                           drm_bo_handle_validate(priv, req->handle, req->mask,
-                                                  req->arg_handle, req->hint,
-                                                  &rep);
-                       break;
-               case drm_bo_fence:
-                       rep.ret = drm_bo_lock_test(dev, filp);
-                       if (rep.ret)
-                               break;
-                        /**/ break;
-               case drm_bo_info:
-                       rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
-                       break;
-               case drm_bo_wait_idle:
-                       rep.ret = drm_bo_handle_wait(priv, req->handle,
-                                                    req->hint, &rep);
-                       break;
-               case drm_bo_ref_fence:
-                       rep.ret = -EINVAL;
-                       DRM_ERROR("Function is not implemented yet.\n");
-               default:
-                       rep.ret = -EINVAL;
+       if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
+               DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+                         "cleanup. Removing flag and evicting.\n");
+               bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
+               bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+       }
+
+       if (bo->mem.mem_type == mem_type)
+               ret = drm_bo_evict(bo, mem_type, 0);
+
+       if (ret) {
+               if (allow_errors) {
+                       goto out;
+               } else {
+                       ret = 0;
+                       DRM_ERROR("Cleanup eviction failed\n");
                }
-               next = arg.next;
+       }
 
-               /*
-                * A signal interrupted us. Make sure the ioctl is restartable.
-                */
+      out:
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
 
-               if (rep.ret == -EAGAIN)
-                       return -EAGAIN;
 
-               arg.handled = 1;
-               arg.d.rep = rep;
-               DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
-               data = next;
-       } while (data);
-       return 0;
+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
+                                        int pinned_list)
+{
+       if (pinned_list)
+               return list_entry(list, struct drm_buffer_object, pinned_lru);
+       else
+               return list_entry(list, struct drm_buffer_object, lru);
 }
 
 /*
- * dev->struct_sem locked.
+ * dev->struct_mutex locked.
  */
 
-static int drm_bo_force_list_clean(drm_device_t * dev,
+static int drm_bo_force_list_clean(struct drm_device * dev,
                                   struct list_head *head,
                                   unsigned mem_type,
-                                  int force_no_move, int allow_errors)
+                                  int free_pinned,
+                                  int allow_errors,
+                                  int pinned_list)
 {
-       drm_buffer_manager_t *bm = &dev->bm;
        struct list_head *list, *next, *prev;
-       drm_buffer_object_t *entry;
+       struct drm_buffer_object *entry, *nentry;
        int ret;
-       int clean;
+       int do_restart;
+
+       /*
+        * The list traversal is a bit odd here, because an item may
+        * disappear from the list when we release the struct_mutex or
+        * when we decrease the usage count. Also we're not guaranteed
+        * to drain pinned lists, so we can't always restart.
+        */
 
-      retry:
-       clean = 1;
+restart:
+       nentry = NULL;
        list_for_each_safe(list, next, head) {
                prev = list->prev;
-               entry = list_entry(list, drm_buffer_object_t, lru);
+
+               entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
                atomic_inc(&entry->usage);
-               mutex_unlock(&dev->struct_mutex);
-               mutex_lock(&entry->mutex);
-               mutex_lock(&dev->struct_mutex);
+               if (nentry) {
+                       atomic_dec(&nentry->usage);
+                       nentry = NULL;
+               }
 
-               if (prev != list->prev || next != list->next) {
-                       mutex_unlock(&entry->mutex);
-                       drm_bo_usage_deref_locked(dev, entry);
-                       goto retry;
+               /*
+                * Protect the next item from destruction, so we can check
+                * its list pointers later on.
+                */
+
+               if (next != head) {
+                       nentry = drm_bo_entry(next, pinned_list);
+                       atomic_inc(&nentry->usage);
                }
-               if (entry->mm_node) {
-                       clean = 0;
+               mutex_unlock(&dev->struct_mutex);
 
-                       /*
-                        * Expire the fence.
-                        */
+               ret = drm_bo_leave_list(entry, mem_type, free_pinned,
+                                       allow_errors);
+               mutex_lock(&dev->struct_mutex);
 
-                       mutex_unlock(&dev->struct_mutex);
-                       if (entry->fence && bm->nice_mode) {
-                               unsigned long _end = jiffies + 3 * DRM_HZ;
-                               do {
-                                       ret = drm_bo_wait(entry, 0, 1, 0);
-                                       if (ret && allow_errors) {
-                                               if (ret == -EINTR)
-                                                       ret = -EAGAIN;
-                                               goto out_err;
-                                       }
-                               } while (ret && !time_after_eq(jiffies, _end));
-
-                               if (entry->fence) {
-                                       bm->nice_mode = 0;
-                                       DRM_ERROR("Detected GPU hang or "
-                                                 "fence manager was taken down. "
-                                                 "Evicting waiting buffers\n");
-                               }
-                       }
-                       if (entry->fence) {
-                               drm_fence_usage_deref_unlocked(dev,
-                                                              entry->fence);
-                               entry->fence = NULL;
-                       }
+               drm_bo_usage_deref_locked(&entry);
+               if (ret)
+                       return ret;
 
-                       DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
-                                    0);
+               /*
+                * Has the next item disappeared from the list?
+                */
 
-                       if (force_no_move) {
-                               DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
-                                            0);
-                       }
-                       if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
-                               DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
-                                         "cleanup. Removing flag and evicting.\n");
-                               entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
-                               entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
-                       }
+               do_restart = ((next->prev != list) && (next->prev != prev));
 
-                       ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
-                       if (ret) {
-                               if (allow_errors) {
-                                       goto out_err;
-                               } else {
-                                       DRM_ERROR("Aargh. Eviction failed.\n");
-                               }
-                       }
-                       mutex_lock(&dev->struct_mutex);
-               }
-               mutex_unlock(&entry->mutex);
-               drm_bo_usage_deref_locked(dev, entry);
-               if (prev != list->prev || next != list->next) {
-                       goto retry;
-               }
+               if (nentry != NULL && do_restart)
+                       drm_bo_usage_deref_locked(&nentry);
+
+               if (do_restart)
+                       goto restart;
        }
-       if (!clean)
-               goto retry;
        return 0;
-      out_err:
-       mutex_unlock(&entry->mutex);
-       drm_bo_usage_deref_unlocked(dev, entry);
-       mutex_lock(&dev->struct_mutex);
-       return ret;
 }
 
-int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
+int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
 {
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
        int ret = -EINVAL;
 
        if (mem_type >= DRM_BO_MEM_TYPES) {
@@ -1679,36 +2038,22 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
                return ret;
        }
 
-       if (!bm->has_type[mem_type]) {
+       if (!man->has_type) {
                DRM_ERROR("Trying to take down uninitialized "
-                         "memory manager type\n");
+                         "memory manager type %u\n", mem_type);
                return ret;
        }
-       bm->use_type[mem_type] = 0;
-       bm->has_type[mem_type] = 0;
+       man->use_type = 0;
+       man->has_type = 0;
 
        ret = 0;
        if (mem_type > 0) {
+               BUG_ON(!list_empty(&bm->unfenced));
+               drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
+               drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
 
-               /*
-                * Throw out unfenced buffers.
-                */
-
-               drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
-
-               /*
-                * Throw out evicted no-move buffers.
-                */
-
-               drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
-                                       mem_type, 1, 0);
-               drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
-                                       0);
-               drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
-                                       0);
-
-               if (drm_mm_clean(&bm->manager[mem_type])) {
-                       drm_mm_takedown(&bm->manager[mem_type]);
+               if (drm_mm_clean(&man->manager)) {
+                       drm_mm_takedown(&man->manager);
                } else {
                        ret = -EBUSY;
                }
@@ -1716,74 +2061,94 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
 
        return ret;
 }
+EXPORT_SYMBOL(drm_bo_clean_mm);
 
-static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
+/**
+ *Evict all buffers of a particular mem_type, but leave memory manager
+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
+ *point since we have the hardware lock.
+ */
+
+static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
 {
        int ret;
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
 
        if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
-               DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
+               DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
                return -EINVAL;
        }
 
-       ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
-       if (ret)
-               return ret;
-       ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
+       if (!man->has_type) {
+               DRM_ERROR("Memory type %u has not been initialized.\n",
+                         mem_type);
+               return 0;
+       }
+
+       ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
        if (ret)
                return ret;
-       ret =
-           drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+       ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
+
        return ret;
 }
 
-static int drm_bo_init_mm(drm_device_t * dev,
-                         unsigned type,
-                         unsigned long p_offset, unsigned long p_size)
+int drm_bo_init_mm(struct drm_device * dev,
+                  unsigned type,
+                  unsigned long p_offset, unsigned long p_size)
 {
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
+       struct drm_mem_type_manager *man;
 
        if (type >= DRM_BO_MEM_TYPES) {
                DRM_ERROR("Illegal memory type %d\n", type);
                return ret;
        }
-       if (bm->has_type[type]) {
+
+       man = &bm->man[type];
+       if (man->has_type) {
                DRM_ERROR("Memory manager already initialized for type %d\n",
                          type);
                return ret;
        }
 
+       ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
+       if (ret)
+               return ret;
+
        ret = 0;
        if (type != DRM_BO_MEM_LOCAL) {
                if (!p_size) {
                        DRM_ERROR("Zero size memory manager type %d\n", type);
                        return ret;
                }
-               ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
+               ret = drm_mm_init(&man->manager, p_offset, p_size);
                if (ret)
                        return ret;
        }
-       bm->has_type[type] = 1;
-       bm->use_type[type] = 1;
+       man->has_type = 1;
+       man->use_type = 1;
 
-       INIT_LIST_HEAD(&bm->lru[type]);
-       INIT_LIST_HEAD(&bm->pinned[type]);
+       INIT_LIST_HEAD(&man->lru);
+       INIT_LIST_HEAD(&man->pinned);
 
        return 0;
 }
+EXPORT_SYMBOL(drm_bo_init_mm);
 
 /*
  * This is called from lastclose, so we don't need to bother about
  * any clients still running when we set the initialized flag to zero.
  */
 
-int drm_bo_driver_finish(drm_device_t * dev)
+int drm_bo_driver_finish(struct drm_device * dev)
 {
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_buffer_manager *bm = &dev->bm;
        int ret = 0;
        unsigned i = DRM_BO_MEM_TYPES;
+       struct drm_mem_type_manager *man;
 
        mutex_lock(&dev->bm.init_mutex);
        mutex_lock(&dev->struct_mutex);
@@ -1793,17 +2158,19 @@ int drm_bo_driver_finish(drm_device_t * dev)
        bm->initialized = 0;
 
        while (i--) {
-               if (bm->has_type[i]) {
-                       bm->use_type[i] = 0;
+               man = &bm->man[i];
+               if (man->has_type) {
+                       man->use_type = 0;
                        if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
                                ret = -EBUSY;
                                DRM_ERROR("DRM memory manager type %d "
                                          "is not clean.\n", i);
                        }
-                       bm->has_type[i] = 0;
+                       man->has_type = 0;
                }
        }
        mutex_unlock(&dev->struct_mutex);
+
        if (!cancel_delayed_work(&bm->wq)) {
                flush_scheduled_work();
        }
@@ -1812,10 +2179,10 @@ int drm_bo_driver_finish(drm_device_t * dev)
        if (list_empty(&bm->ddestroy)) {
                DRM_DEBUG("Delayed destroy list was clean\n");
        }
-       if (list_empty(&bm->lru[0])) {
+       if (list_empty(&bm->man[0].lru)) {
                DRM_DEBUG("Swap list was clean\n");
        }
-       if (list_empty(&bm->pinned[0])) {
+       if (list_empty(&bm->man[0].pinned)) {
                DRM_DEBUG("NO_MOVE list was clean\n");
        }
        if (list_empty(&bm->unfenced)) {
@@ -1827,10 +2194,10 @@ int drm_bo_driver_finish(drm_device_t * dev)
        return ret;
 }
 
-int drm_bo_driver_init(drm_device_t * dev)
+int drm_bo_driver_init(struct drm_device * dev)
 {
-       drm_bo_driver_t *driver = dev->driver->bo_driver;
-       drm_buffer_manager_t *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
 
        mutex_lock(&dev->bm.init_mutex);
@@ -1842,8 +2209,7 @@ int drm_bo_driver_init(drm_device_t * dev)
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
         */
-
-       ret = drm_bo_init_mm(dev, 0, 0, 0);
+       ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
        if (ret)
                goto out_unlock;
 
@@ -1866,80 +2232,286 @@ int drm_bo_driver_init(drm_device_t * dev)
 
 EXPORT_SYMBOL(drm_bo_driver_init);
 
-int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
-       DRM_DEVICE;
+       struct drm_mm_init_arg *arg = data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
 
-       int ret = 0;
-       drm_mm_init_arg_t arg;
-       drm_buffer_manager_t *bm = &dev->bm;
-       drm_bo_driver_t *driver = dev->driver->bo_driver;
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       ret = -EINVAL;
+       if (arg->magic != DRM_BO_INIT_MAGIC) {
+               DRM_ERROR("You are using an old libdrm that is not compatible with\n"
+                         "\tthe kernel DRM module. Please upgrade your libdrm.\n");
+               return -EINVAL;
+       }
+       if (arg->major != DRM_BO_INIT_MAJOR) {
+               DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
+                         "\tversion don't match. Got %d, expected %d,\n",
+                         arg->major, DRM_BO_INIT_MAJOR);
+               return -EINVAL;
+       }
+       if (arg->minor > DRM_BO_INIT_MINOR) {
+               DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
+                         "\tlibdrm buffer object interface version is %d.%d.\n"
+                         "\tkernel DRM buffer object interface version is %d.%d\n",
+                         arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->bm.init_mutex);
+       mutex_lock(&dev->struct_mutex);
+       if (!bm->initialized) {
+               DRM_ERROR("DRM memory manager was not initialized.\n");
+               goto out;
+       }
+       if (arg->mem_type == 0) {
+               DRM_ERROR("System memory buffers already initialized.\n");
+               goto out;
+       }
+       ret = drm_bo_init_mm(dev, arg->mem_type,
+                            arg->p_offset, arg->p_size);
+
+out:
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->bm.init_mutex);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_mm_type_arg *arg = data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
 
        if (!driver) {
                DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
-       DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+       mutex_lock(&dev->bm.init_mutex);
+       mutex_lock(&dev->struct_mutex);
+       ret = -EINVAL;
+       if (!bm->initialized) {
+               DRM_ERROR("DRM memory manager was not initialized\n");
+               goto out;
+       }
+       if (arg->mem_type == 0) {
+               DRM_ERROR("No takedown for System memory buffers.\n");
+               goto out;
+       }
+       ret = 0;
+       if (drm_bo_clean_mm(dev, arg->mem_type)) {
+               DRM_ERROR("Memory manager type %d not clean. "
+                         "Delaying takedown\n", arg->mem_type);
+       }
+out:
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->bm.init_mutex);
+       if (ret)
+               return ret;
 
-       switch (arg.req.op) {
-       case mm_init:
-               ret = -EINVAL;
-               mutex_lock(&dev->bm.init_mutex);
-               mutex_lock(&dev->struct_mutex);
-               if (!bm->initialized) {
-                       DRM_ERROR("DRM memory manager was not initialized.\n");
-                       break;
-               }
-               if (arg.req.mem_type == 0) {
-                       DRM_ERROR
-                           ("System memory buffers already initialized.\n");
-                       break;
-               }
-               ret = drm_bo_init_mm(dev, arg.req.mem_type,
-                                    arg.req.p_offset, arg.req.p_size);
-               break;
-       case mm_takedown:
-               LOCK_TEST_WITH_RETURN(dev, filp);
-               mutex_lock(&dev->bm.init_mutex);
-               mutex_lock(&dev->struct_mutex);
-               ret = -EINVAL;
-               if (!bm->initialized) {
-                       DRM_ERROR("DRM memory manager was not initialized\n");
-                       break;
-               }
-               if (arg.req.mem_type == 0) {
-                       DRM_ERROR("No takedown for System memory buffers.\n");
-                       break;
-               }
-               ret = 0;
-               if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
-                       DRM_ERROR("Memory manager type %d not clean. "
-                                 "Delaying takedown\n", arg.req.mem_type);
-               }
-               break;
-       case mm_lock:
-               LOCK_TEST_WITH_RETURN(dev, filp);
-               mutex_lock(&dev->bm.init_mutex);
-               mutex_lock(&dev->struct_mutex);
-               ret = drm_bo_lock_mm(dev, arg.req.mem_type);
-               break;
-       case mm_unlock:
-               LOCK_TEST_WITH_RETURN(dev, filp);
-               mutex_lock(&dev->bm.init_mutex);
-               mutex_lock(&dev->struct_mutex);
-               ret = 0;
-               break;
-       default:
-               DRM_ERROR("Function not implemented yet\n");
+       return 0;
+}
+
+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_mm_type_arg *arg = data;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
                return -EINVAL;
        }
 
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+       mutex_lock(&dev->bm.init_mutex);
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_bo_lock_mm(dev, arg->mem_type);
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->bm.init_mutex);
        if (ret)
                return ret;
 
-       DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+       return 0;
+}
+
+int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       int ret;
+
+       if (!driver) {
+               DRM_ERROR("Buffer objects are not supported by this driver\n");
+               return -EINVAL;
+       }
+
+       LOCK_TEST_WITH_RETURN(dev, file_priv);
+       mutex_lock(&dev->bm.init_mutex);
+       mutex_lock(&dev->struct_mutex);
+       ret = 0;
+
+       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->bm.init_mutex);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * buffer object vm functions.
+ */
+
+int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
+
+       if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+               if (mem->mem_type == DRM_BO_MEM_LOCAL)
+                       return 0;
+
+               if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
+                       return 0;
+
+               if (mem->flags & DRM_BO_FLAG_CACHED)
+                       return 0;
+       }
+       return 1;
+}
+
+EXPORT_SYMBOL(drm_mem_reg_is_pci);
+
+/**
+ * \c Get the PCI offset for the buffer object memory.
+ *
+ * \param bo The buffer object.
+ * \param bus_base On return the base of the PCI region
+ * \param bus_offset On return the byte offset into the PCI region
+ * \param bus_size On return the byte size of the buffer object or zero if
+ *     the buffer object memory is not accessible through a PCI region.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Otherwise returns zero.
+ */
+
+int drm_bo_pci_offset(struct drm_device *dev,
+                     struct drm_bo_mem_reg *mem,
+                     unsigned long *bus_base,
+                     unsigned long *bus_offset, unsigned long *bus_size)
+{
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
+
+       *bus_size = 0;
+       if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
+               return -EINVAL;
+
+       if (drm_mem_reg_is_pci(dev, mem)) {
+               *bus_offset = mem->mm_node->start << PAGE_SHIFT;
+               *bus_size = mem->num_pages << PAGE_SHIFT;
+               *bus_base = man->io_offset;
+       }
+
+       return 0;
+}
+
+/**
+ * \c Kill all user-space virtual mappings of this buffer object.
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
+{
+       struct drm_device *dev = bo->dev;
+       loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
+       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+       if (!dev->dev_mapping)
+               return;
+
+       unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+}
+
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
+{
+       struct drm_map_list *list = &bo->map_list;
+       drm_local_map_t *map;
+       struct drm_device *dev = bo->dev;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       if (list->user_token) {
+               drm_ht_remove_item(&dev->map_hash, &list->hash);
+               list->user_token = 0;
+       }
+       if (list->file_offset_node) {
+               drm_mm_put_block(list->file_offset_node);
+               list->file_offset_node = NULL;
+       }
+
+       map = list->map;
+       if (!map)
+               return;
+
+       drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
+       list->map = NULL;
+       list->user_token = 0ULL;
+       drm_bo_usage_deref_locked(&bo);
+}
+
+static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
+{
+       struct drm_map_list *list = &bo->map_list;
+       drm_local_map_t *map;
+       struct drm_device *dev = bo->dev;
+
+       DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
+       if (!list->map)
+               return -ENOMEM;
+
+       map = list->map;
+       map->offset = 0;
+       map->type = _DRM_TTM;
+       map->flags = _DRM_REMOVABLE;
+       map->size = bo->mem.num_pages * PAGE_SIZE;
+       atomic_inc(&bo->usage);
+       map->handle = (void *)bo;
+
+       list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+                                                   bo->mem.num_pages, 0, 0);
+
+       if (!list->file_offset_node) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+
+       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+                                                 bo->mem.num_pages, 0);
+
+       list->hash.key = list->file_offset_node->start;
+       if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+
+       list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
        return 0;
 }