Merge tag 'drm-intel-next-2013-07-26-fixed' of git://people.freedesktop.org/~danvet...
authorDave Airlie <airlied@gmail.com>
Wed, 7 Aug 2013 08:09:03 +0000 (18:09 +1000)
committerDave Airlie <airlied@gmail.com>
Wed, 7 Aug 2013 08:11:35 +0000 (18:11 +1000)
Neat that QA (and Ben) keeps on humming along while I'm on vacation, so
you already get the next feature pull request:
- proper eLLC support for HSW from Ben
- more interrupt refactoring
- add w/a tags where we implement them already (Damien)
- hangcheck fixes (Chris) + hangcheck stats (Mika)
- flesh out the new vm structs for ppgtt and ggtt (Ben)
- PSR for Haswell, still disabled by default (Rodrigo et al.)
- pc8+ refclock sequence code from Paulo
- more interrupt refactoring from Paulo, unifying ilk/snb with the ivb/hsw
  interrupt code
- full solution for the Haswell concurrent reg access issues (Chris)
- fix racy object accounting, used by some new leak tests
- fix sync polarity settings on ch7xxx dvo encoder
- random bits&pieces, little fixes and better debug output all over

[airlied: fix conflict with drm_mm cleanups]

* tag 'drm-intel-next-2013-07-26-fixed' of git://people.freedesktop.org/~danvet/drm-intel: (289 commits)
  drm/i915: Do not dereference NULL crtc or fb until after checking
  drm/i915: fix pnv display core clock readout out
  drm/i915: Replace open-coded offset_in_page()
  drm/i915: Retry DP aux_ch communications with a different clock after failure
  drm/i915: Add messages useful for HPD storm detection debugging (v2)
  drm/i915: dvo_ch7xxx: fix vsync polarity setting
  drm/i915: fix the racy object accounting
  drm/i915: Convert the register access tracepoint to be conditional
  drm/i915: Squash gen lookup through multiple indirections inside GT access
  drm/i915: Use the common register access functions for NOTRACE variants
  drm/i915: Use a private interface for register access within GT
  drm/i915: Colocate all GT access routines in the same file
  drm/i915: fix reference counting in i915_gem_create
  drm/i915: Use Graphics Base of Stolen Memory on all gen3+
  drm/i915: disable stolen mem for OVERLAY_NEEDS_PHYSICAL
  drm/i915: add functions to disable and restore LCPLL
  drm/i915: disable CLKOUT_DP when it's not needed
  drm/i915: extend lpt_enable_clkout_dp
  drm/i915: fix up error cleanup in i915_gem_object_bind_to_gtt
  drm/i915: Add some debug breadcrumbs to connector detection
  ...

103 files changed:
Documentation/DocBook/drm.tmpl
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_scatter.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_vma_manager.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/mga/mga_state.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem_helpers.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_dumb.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_object.h
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/r128/r128_cce.c
drivers/gpu/drm/r128/r128_drv.h
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/savage/savage_drv.h
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sis/sis_drv.h
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/via/via_dma.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/host1x/drm/drm.c
drivers/gpu/host1x/drm/gem.c
drivers/gpu/host1x/drm/gem.h
drivers/staging/imx-drm/imx-drm-core.c
include/drm/drmP.h
include/drm/drm_gem_cma_helper.h
include/drm/drm_mm.h
include/drm/drm_vma_manager.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/uapi/drm/drm.h

index 7d1278e..87e22ec 100644 (file)
@@ -2212,6 +2212,12 @@ void intel_crt_init(struct drm_device *dev)
 !Iinclude/drm/drm_rect.h
 !Edrivers/gpu/drm/drm_rect.c
     </sect2>
+    <sect2>
+      <title>VMA Offset Manager</title>
+!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
+!Edrivers/gpu/drm/drm_vma_manager.c
+!Iinclude/drm/drm_vma_manager.h
+    </sect2>
   </sect1>
 
   <!-- Internals: kms properties -->
index 801bcaf..d943b94 100644 (file)
@@ -13,7 +13,7 @@ drm-y       :=        drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_prime.o \
-               drm_rect.o
+               drm_rect.o drm_vma_manager.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
index df0d0a0..a144fb0 100644 (file)
@@ -216,7 +216,7 @@ static struct drm_driver driver = {
        .gem_free_object = ast_gem_free_object,
        .dumb_create = ast_dumb_create,
        .dumb_map_offset = ast_dumb_mmap_offset,
-       .dumb_destroy = ast_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
 };
 
index 622d4ae..796dbb2 100644 (file)
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
 extern int ast_dumb_create(struct drm_file *file,
                           struct drm_device *dev,
                           struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
-                           struct drm_device *dev,
-                           uint32_t handle);
 
 extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
index f60fd7b..7f6152d 100644 (file)
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int ast_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int ast_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 int
 ast_dumb_mmap_offset(struct drm_file *file,
index 8ecb601..d35d99c 100644 (file)
@@ -102,7 +102,7 @@ static struct drm_driver driver = {
        .gem_free_object = cirrus_gem_free_object,
        .dumb_create = cirrus_dumb_create,
        .dumb_map_offset = cirrus_dumb_mmap_offset,
-       .dumb_destroy = cirrus_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 };
 
 static struct pci_driver cirrus_pci_driver = {
index bae5560..9b0bb91 100644 (file)
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
 int cirrus_dumb_create(struct drm_file *file,
                    struct drm_device *dev,
                       struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                       uint32_t handle);
 
 int cirrus_framebuffer_init(struct drm_device *dev,
                           struct cirrus_framebuffer *gfb,
index 35cbae8..f130a53 100644 (file)
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int cirrus_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int cirrus_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 
 int
index 3d8fed1..e301d65 100644 (file)
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 }
 
 /**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+       struct drm_agp_mem *entry, *tempe;
+
+       if (!drm_core_has_AGP(dev) || !dev->agp)
+               return;
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+               if (entry->bound)
+                       drm_unbind_agp(entry->memory);
+               drm_free_agp(entry->memory, entry->pages);
+               kfree(entry);
+       }
+       INIT_LIST_HEAD(&dev->agp->memory);
+
+       if (dev->agp->acquired)
+               drm_agp_release(dev);
+
+       dev->agp->acquired = 0;
+       dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+       kfree(agp);
+}
+
+/**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
  *
index 5a4dbb4..bef4abf 100644 (file)
@@ -243,7 +243,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                }
                map->handle = vmalloc_user(map->size);
                DRM_DEBUG("%lu %d %p\n",
-                         map->size, drm_order(map->size), map->handle);
+                         map->size, order_base_2(map->size), map->handle);
                if (!map->handle) {
                        kfree(map);
                        return -ENOMEM;
@@ -630,7 +630,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
                return -EINVAL;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +800,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
                return -EPERM;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +1002,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
                return -EPERM;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1157,7 +1157,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
                return -EPERM;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1435,7 +1435,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
 
        DRM_DEBUG("%d, %d, %d\n",
                  request->size, request->low_mark, request->high_mark);
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
                return -EINVAL;
        entry = &dma->bufs[order];
@@ -1600,25 +1600,16 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        return retcode;
 }
 
-/**
- * Compute size order.  Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
 {
-       int order;
-       unsigned long tmp;
-
-       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
-       if (size & (size - 1))
-               ++order;
+       struct drm_map_list *entry;
 
-       return order;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
 }
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
index 725968d..224ff96 100644 (file)
@@ -251,7 +251,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
                                       struct drm_file *file_priv, int new)
 {
        dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
-       dev->last_switch = jiffies;
 
        if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
                DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +260,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
           when the kernel holds the lock, release
           that lock here. */
        clear_bit(0, &dev->context_flag);
-       wake_up(&dev->context_wait);
 
        return 0;
 }
@@ -342,12 +340,6 @@ int drm_addctx(struct drm_device *dev, void *data,
        return 0;
 }
 
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       /* This does nothing */
-       return 0;
-}
-
 /**
  * Get context.
  *
index 99fcd7c..dddd799 100644 (file)
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -122,7 +122,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 #endif
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -195,27 +195,8 @@ int drm_lastclose(struct drm_device * dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       /* Clear AGP information */
-       if (drm_core_has_AGP(dev) && dev->agp &&
-                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
-               struct drm_agp_mem *entry, *tempe;
-
-               /* Remove AGP resources, but leave dev->agp
-                  intact until drv_cleanup is called. */
-               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
-                       if (entry->bound)
-                               drm_unbind_agp(entry->memory);
-                       drm_free_agp(entry->memory, entry->pages);
-                       kfree(entry);
-               }
-               INIT_LIST_HEAD(&dev->agp->memory);
-
-               if (dev->agp->acquired)
-                       drm_agp_release(dev);
+       drm_agp_clear(dev);
 
-               dev->agp->acquired = 0;
-               dev->agp->enabled = 0;
-       }
        if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
            !drm_core_check_feature(dev, DRIVER_MODESET)) {
                drm_sg_cleanup(dev->sg);
@@ -485,19 +466,4 @@ long drm_ioctl(struct file *filp,
                DRM_DEBUG("ret = %d\n", retcode);
        return retcode;
 }
-
 EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
-       struct drm_map_list *entry;
-
-       list_for_each_entry(entry, &dev->maplist, head) {
-               if (entry->map && entry->map->type == _DRM_SHM &&
-                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
-                       return entry->map;
-               }
-       }
-       return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
index 3a24385..72acae9 100644 (file)
@@ -76,20 +76,10 @@ static int drm_setup(struct drm_device * dev)
        dev->sigdata.lock = NULL;
 
        dev->context_flag = 0;
-       dev->interrupt_flag = 0;
-       dev->dma_flag = 0;
        dev->last_context = 0;
-       dev->last_switch = 0;
-       dev->last_checked = 0;
-       init_waitqueue_head(&dev->context_wait);
        dev->if_version = 0;
 
-       dev->ctx_start = 0;
-       dev->lck_start = 0;
-
        dev->buf_async = NULL;
-       init_waitqueue_head(&dev->buf_readers);
-       init_waitqueue_head(&dev->buf_writers);
 
        DRM_DEBUG("\n");
 
index 603f256..9ab038c 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 /** @file drm_gem.c
  *
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
        }
 
        dev->mm_private = mm;
-
-       if (drm_ht_create(&mm->offset_hash, 12)) {
-               kfree(mm);
-               return -ENOMEM;
-       }
-
-       drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-                   DRM_FILE_PAGE_OFFSET_SIZE);
+       drm_vma_offset_manager_init(&mm->vma_manager,
+                                   DRM_FILE_PAGE_OFFSET_START,
+                                   DRM_FILE_PAGE_OFFSET_SIZE);
 
        return 0;
 }
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
 {
        struct drm_gem_mm *mm = dev->mm_private;
 
-       drm_mm_takedown(&mm->offset_manager);
-       drm_ht_remove(&mm->offset_hash);
+       drm_vma_offset_manager_destroy(&mm->vma_manager);
        kfree(mm);
        dev->mm_private = NULL;
 }
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size)
 {
-       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+       struct file *filp;
 
-       obj->dev = dev;
-       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
-       if (IS_ERR(obj->filp))
-               return PTR_ERR(obj->filp);
+       filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(filp))
+               return PTR_ERR(filp);
 
-       kref_init(&obj->refcount);
-       atomic_set(&obj->handle_count, 0);
-       obj->size = size;
+       drm_gem_private_object_init(dev, obj, size);
+       obj->filp = filp;
 
        return 0;
 }
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
  * no GEM provided backing store. Instead the caller is responsible for
  * backing the object and handling it.
  */
-int drm_gem_private_object_init(struct drm_device *dev,
-                       struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+                                struct drm_gem_object *obj, size_t size)
 {
        BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
@@ -163,8 +156,6 @@ int drm_gem_private_object_init(struct drm_device *dev,
        kref_init(&obj->refcount);
        atomic_set(&obj->handle_count, 0);
        obj->size = size;
-
-       return 0;
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
@@ -253,6 +244,20 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 EXPORT_SYMBOL(drm_gem_handle_delete);
 
 /**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * 
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle)
+{
+       return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
  * Create a handle for this object. This adds a handle reference
  * to the object, which includes a regular reference count. Callers
  * will likely want to dereference the object afterwards.
@@ -306,12 +311,8 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list = &obj->map_list;
 
-       drm_ht_remove_item(&mm->offset_hash, &list->hash);
-       drm_mm_put_block(list->file_offset_node);
-       kfree(list->map);
-       list->map = NULL;
+       drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
@@ -331,54 +332,9 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
-       struct drm_local_map *map;
-       int ret;
 
-       /* Set the object up for mmap'ing */
-       list = &obj->map_list;
-       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-       if (!list->map)
-               return -ENOMEM;
-
-       map = list->map;
-       map->type = _DRM_GEM;
-       map->size = obj->size;
-       map->handle = obj;
-
-       /* Get a DRM GEM mmap offset allocated... */
-       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-                       obj->size / PAGE_SIZE, 0, false);
-
-       if (!list->file_offset_node) {
-               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOSPC;
-               goto out_free_list;
-       }
-
-       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                       obj->size / PAGE_SIZE, 0);
-       if (!list->file_offset_node) {
-               ret = -ENOMEM;
-               goto out_free_list;
-       }
-
-       list->hash.key = list->file_offset_node->start;
-       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-       if (ret) {
-               DRM_ERROR("failed to add to map hash\n");
-               goto out_free_mm;
-       }
-
-       return 0;
-
-out_free_mm:
-       drm_mm_put_block(list->file_offset_node);
-out_free_list:
-       kfree(list->map);
-       list->map = NULL;
-
-       return ret;
+       return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+                                 obj->size / PAGE_SIZE);
 }
 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
@@ -707,8 +663,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_local_map *map = NULL;
-       struct drm_hash_item *hash;
+       struct drm_gem_object *obj;
+       struct drm_vma_offset_node *node;
        int ret = 0;
 
        if (drm_device_is_unplugged(dev))
@@ -716,21 +672,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+       node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+                                          vma_pages(vma));
+       if (!node) {
                mutex_unlock(&dev->struct_mutex);
                return drm_mmap(filp, vma);
        }
 
-       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
-       if (!map ||
-           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
-               ret =  -EPERM;
-               goto out_unlock;
-       }
-
-       ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+       obj = container_of(node, struct drm_gem_object, vma_node);
+       ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
 
-out_unlock:
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
index ece72a8..0a4f805 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
-       return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
 
 /*
  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_gem_cma_object *cma_obj;
 
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
+       drm_gem_free_mmap_offset(gem_obj);
 
        cma_obj = to_drm_gem_cma_obj(gem_obj);
 
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
                return -EINVAL;
        }
 
-       *offset = get_gem_mmap_offset(gem_obj);
+       *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
        drm_gem_object_unreference(gem_obj);
 
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *drm, unsigned int handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
 #ifdef CONFIG_DEBUG_FS
 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
 {
        struct drm_gem_object *obj = &cma_obj->base;
        struct drm_device *dev = obj->dev;
-       uint64_t off = 0;
+       uint64_t off;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (obj->map_list.map)
-               off = (uint64_t)obj->map_list.hash.key;
+       off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
                        obj->name, obj->refcount.refcount.counter,
index fe304f9..aded1e1 100644 (file)
 
 #define MM_UNUSED_TARGET 4
 
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
-       struct drm_mm_node *child;
-
-       if (atomic)
-               child = kzalloc(sizeof(*child), GFP_ATOMIC);
-       else
-               child = kzalloc(sizeof(*child), GFP_KERNEL);
-
-       if (unlikely(child == NULL)) {
-               spin_lock(&mm->unused_lock);
-               if (list_empty(&mm->unused_nodes))
-                       child = NULL;
-               else {
-                       child =
-                           list_entry(mm->unused_nodes.next,
-                                      struct drm_mm_node, node_list);
-                       list_del(&child->node_list);
-                       --mm->num_unused;
-               }
-               spin_unlock(&mm->unused_lock);
-       }
-       return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm:     memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
-       struct drm_mm_node *node;
-
-       spin_lock(&mm->unused_lock);
-       while (mm->num_unused < MM_UNUSED_TARGET) {
-               spin_unlock(&mm->unused_lock);
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               spin_lock(&mm->unused_lock);
-
-               if (unlikely(node == NULL)) {
-                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
-                       spin_unlock(&mm->unused_lock);
-                       return ret;
-               }
-               ++mm->num_unused;
-               list_add_tail(&node->node_list, &mm->unused_nodes);
-       }
-       spin_unlock(&mm->unused_lock);
-       return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long color,
+                                               enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long color,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               enum drm_mm_search_flags flags);
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                                 struct drm_mm_node *node,
@@ -187,24 +147,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
-                                            unsigned long size,
-                                            unsigned alignment,
-                                            unsigned long color,
-                                            int atomic)
-{
-       struct drm_mm_node *node;
-
-       node = drm_mm_kmalloc(hole_node->mm, atomic);
-       if (unlikely(node == NULL))
-               return NULL;
-
-       drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
-       return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
-
 /**
  * Search for free space and insert a preallocated memory node. Returns
  * -ENOSPC if no suitable free area is available. The preallocated memory node
@@ -212,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
  */
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
                               unsigned long size, unsigned alignment,
-                              unsigned long color)
+                              unsigned long color,
+                              enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *hole_node;
 
        hole_node = drm_mm_search_free_generic(mm, size, alignment,
-                                              color, 0);
+                                              color, flags);
        if (!hole_node)
                return -ENOSPC;
 
@@ -226,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 }
 EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-                      unsigned long size, unsigned alignment)
-{
-       return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
                                       struct drm_mm_node *node,
                                       unsigned long size, unsigned alignment,
@@ -285,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
        }
 }
 
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               int atomic)
-{
-       struct drm_mm_node *node;
-
-       node = drm_mm_kmalloc(hole_node->mm, atomic);
-       if (unlikely(node == NULL))
-               return NULL;
-
-       drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
-                                  start, end);
-
-       return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
 /**
  * Search for free space and insert a preallocated memory node. Returns
  * -ENOSPC if no suitable free area is available. This is for range
@@ -313,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
                                        unsigned long size, unsigned alignment, unsigned long color,
-                                       unsigned long start, unsigned long end)
+                                       unsigned long start, unsigned long end,
+                                       enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *hole_node;
 
        hole_node = drm_mm_search_free_in_range_generic(mm,
                                                        size, alignment, color,
-                                                       start, end, 0);
+                                                       start, end, flags);
        if (!hole_node)
                return -ENOSPC;
 
@@ -330,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
-                               unsigned long size, unsigned alignment,
-                               unsigned long start, unsigned long end)
-{
-       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
 /**
  * Remove a memory node from the allocator.
  */
@@ -372,28 +280,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
-       struct drm_mm *mm = node->mm;
-
-       drm_mm_remove_node(node);
-
-       spin_lock(&mm->unused_lock);
-       if (mm->num_unused < MM_UNUSED_TARGET) {
-               list_add(&node->node_list, &mm->unused_nodes);
-               ++mm->num_unused;
-       } else
-               kfree(node);
-       spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
 static int check_free_hole(unsigned long start, unsigned long end,
                           unsigned long size, unsigned alignment)
 {
@@ -409,11 +295,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
        return end >= start + size;
 }
 
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                              unsigned long size,
-                                              unsigned alignment,
-                                              unsigned long color,
-                                              bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+                                                     unsigned long size,
+                                                     unsigned alignment,
+                                                     unsigned long color,
+                                                     enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
@@ -436,7 +322,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
-               if (!best_match)
+               if (!(flags & DRM_MM_SEARCH_BEST))
                        return entry;
 
                if (entry->size < best_size) {
@@ -447,15 +333,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 
        return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_generic);
 
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
                                                        unsigned long size,
                                                        unsigned alignment,
                                                        unsigned long color,
                                                        unsigned long start,
                                                        unsigned long end,
-                                                       bool best_match)
+                                                       enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
@@ -483,7 +368,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
-               if (!best_match)
+               if (!(flags & DRM_MM_SEARCH_BEST))
                        return entry;
 
                if (entry->size < best_size) {
@@ -494,7 +379,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 
        return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
 /**
  * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -629,8 +513,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
  * corrupted.
  *
  * When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
  *
  * Returns one if this block should be evicted, zero otherwise. Will always
  * return zero when no hole has been found.
@@ -667,10 +551,7 @@ EXPORT_SYMBOL(drm_mm_clean);
 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
        INIT_LIST_HEAD(&mm->hole_stack);
-       INIT_LIST_HEAD(&mm->unused_nodes);
-       mm->num_unused = 0;
        mm->scanned_blocks = 0;
-       spin_lock_init(&mm->unused_lock);
 
        /* Clever trick to avoid a special case in the free hole tracking. */
        INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -690,22 +571,8 @@ EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
-       struct drm_mm_node *entry, *next;
-
-       if (WARN(!list_empty(&mm->head_node.node_list),
-                "Memory manager not clean. Delaying takedown\n")) {
-               return;
-       }
-
-       spin_lock(&mm->unused_lock);
-       list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
-               list_del(&entry->node_list);
-               kfree(entry);
-               --mm->num_unused;
-       }
-       spin_unlock(&mm->unused_lock);
-
-       BUG_ON(mm->num_unused != 0);
+       WARN(!list_empty(&mm->head_node.node_list),
+            "Memory manager not clean during takedown.\n");
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
index 80c0b2b..0f54ad8 100644 (file)
 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
 {
        drm_dma_handle_t *dmah;
-#if 1
        unsigned long addr;
        size_t sz;
-#endif
 
        /* pci_alloc_consistent only guarantees alignment to the smallest
         * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
  */
 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
-#if 1
        unsigned long addr;
        size_t sz;
-#endif
 
        if (dmah->vaddr) {
                /* XXX - Is virt_to_page() legal for consistent mem? */
@@ -287,6 +283,17 @@ static int drm_pci_agp_init(struct drm_device *dev)
        return 0;
 }
 
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               if (drm_core_has_MTRR(dev))
+                       arch_phys_wc_del(dev->agp->agp_mtrr);
+               drm_agp_clear(dev);
+               drm_agp_destroy(dev->agp);
+               dev->agp = NULL;
+       }
+}
+
 static struct drm_bus drm_pci_bus = {
        .bus_type = DRIVER_BUS_PCI,
        .get_irq = drm_pci_get_irq,
@@ -295,6 +302,7 @@ static struct drm_bus drm_pci_bus = {
        .set_unique = drm_pci_set_unique,
        .irq_by_busid = drm_pci_irq_by_busid,
        .agp_init = drm_pci_agp_init,
+       .agp_destroy = drm_pci_agp_destroy,
 };
 
 /**
index d87f60b..a4a076f 100644 (file)
@@ -70,8 +70,10 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
 # define ScatterHandle(x) (unsigned int)(x)
 #endif
 
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
 {
+       struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
        unsigned long pages, i, j;
 
@@ -181,15 +183,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
        return -ENOMEM;
 }
 
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv)
-{
-       struct drm_scatter_gather *request = data;
-
-       return drm_sg_alloc(dev, request);
-
-}
-
 int drm_sg_free(struct drm_device *dev, void *data,
                struct drm_file *file_priv)
 {
index 327ca19..d663f7d 100644 (file)
@@ -451,16 +451,11 @@ void drm_put_dev(struct drm_device *dev)
 
        drm_lastclose(dev);
 
-       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
-               arch_phys_wc_del(dev->agp->agp_mtrr);
-
        if (dev->driver->unload)
                dev->driver->unload(dev);
 
-       if (drm_core_has_AGP(dev) && dev->agp) {
-               kfree(dev->agp);
-               dev->agp = NULL;
-       }
+       if (dev->driver->bus->agp_destroy)
+               dev->driver->bus->agp_destroy(dev);
 
        drm_vblank_cleanup(dev);
 
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644 (file)
index 0000000..3837481
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+                                unsigned long page_offset, unsigned long size)
+{
+       rwlock_init(&mgr->vm_lock);
+       mgr->vm_addr_space_rb = RB_ROOT;
+       drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+       /* take the lock to protect against buggy drivers */
+       write_lock(&mgr->vm_lock);
+       drm_mm_takedown(&mgr->vm_addr_space_mm);
+       write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+                                                 unsigned long start,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+
+       read_lock(&mgr->vm_lock);
+       node = drm_vma_offset_lookup_locked(mgr, start, pages);
+       read_unlock(&mgr->vm_lock);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+                                                        unsigned long start,
+                                                        unsigned long pages)
+{
+       struct drm_vma_offset_node *node, *best;
+       struct rb_node *iter;
+       unsigned long offset;
+
+       iter = mgr->vm_addr_space_rb.rb_node;
+       best = NULL;
+
+       while (likely(iter)) {
+               node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+               offset = node->vm_node.start;
+               if (start >= offset) {
+                       iter = iter->rb_right;
+                       best = node;
+                       if (start == offset)
+                               break;
+               } else {
+                       iter = iter->rb_left;
+               }
+       }
+
+       /* verify that the node spans the requested area */
+       if (best) {
+               offset = best->vm_node.start + best->vm_node.size;
+               if (offset < start + pages)
+                       best = NULL;
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+                                  struct drm_vma_offset_node *node)
+{
+       struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+       struct rb_node *parent = NULL;
+       struct drm_vma_offset_node *iter_node;
+
+       while (likely(*iter)) {
+               parent = *iter;
+               iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+               if (node->vm_node.start < iter_node->vm_node.start)
+                       iter = &(*iter)->rb_left;
+               else if (node->vm_node.start > iter_node->vm_node.start)
+                       iter = &(*iter)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&node->vm_rb, parent, iter);
+       rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+                      struct drm_vma_offset_node *node, unsigned long pages)
+{
+       int ret;
+
+       write_lock(&mgr->vm_lock);
+
+       if (drm_mm_node_allocated(&node->vm_node)) {
+               ret = 0;
+               goto out_unlock;
+       }
+
+       ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+                                pages, 0, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               goto out_unlock;
+
+       _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+       write_unlock(&mgr->vm_lock);
+       return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+                          struct drm_vma_offset_node *node)
+{
+       write_lock(&mgr->vm_lock);
+
+       if (drm_mm_node_allocated(&node->vm_node)) {
+               rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+               drm_mm_remove_node(&node->vm_node);
+               memset(&node->vm_node, 0, sizeof(node->vm_node));
+       }
+
+       write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
index ca2729a..df81d3c 100644 (file)
@@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
                        DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +271,13 @@ static struct drm_driver exynos_drm_driver = {
        .gem_vm_ops             = &exynos_drm_gem_vm_ops,
        .dumb_create            = exynos_drm_gem_dumb_create,
        .dumb_map_offset        = exynos_drm_gem_dumb_map_offset,
-       .dumb_destroy           = exynos_drm_gem_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = exynos_dmabuf_prime_export,
        .gem_prime_import       = exynos_dmabuf_prime_import,
        .ioctls                 = exynos_ioctls,
+       .num_ioctls             = ARRAY_SIZE(exynos_ioctls),
        .fops                   = &exynos_drm_driver_fops,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
@@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
        return drm_platform_init(&exynos_drm_driver, pdev);
 }
index 24c22a8..b904633 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
 #include <drm/exynos_drm.h>
@@ -152,8 +153,7 @@ out:
        exynos_drm_fini_buf(obj->dev, buf);
        exynos_gem_obj->buffer = NULL;
 
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
@@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       if (!obj->map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
 
-       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
@@ -719,26 +717,6 @@ unlock:
        return ret;
 }
 
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-                               struct drm_device *dev,
-                               unsigned int handle)
-{
-       int ret;
-
-       /*
-        * obj->refcount and obj->handle_count are decreased and
-        * if both them are 0 then exynos_drm_gem_free_object()
-        * would be called by callback to release resources.
-        */
-       ret = drm_gem_handle_delete(file_priv, handle);
-       if (ret < 0) {
-               DRM_ERROR("failed to delete drm_gem_handle.\n");
-               return ret;
-       }
-
-       return 0;
-}
-
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
index 468766b..09555af 100644 (file)
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                                   struct drm_device *dev, uint32_t handle,
                                   uint64_t *offset);
 
-/*
- * destroy memory region allocated.
- *     - a gem handle and physical memory region pointed by a gem object
- *     would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-                               struct drm_device *dev,
-                               unsigned int handle);
-
 /* page fault handler and mmap fault address(virtual) to physical memory. */
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
index 8b1b6d9..362dd2a 100644 (file)
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
        /* Begin by trying to use stolen memory backing */
        backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
        if (backing) {
-               if (drm_gem_private_object_init(dev,
-                                       &backing->gem, aligned_size) == 0)
-                       return backing;
-               psb_gtt_free_range(dev, backing);
+               drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+               return backing;
        }
        return NULL;
 }
index eefd6cc..10ae8c5 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
 int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
        struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 
        /* Remove the list map if one is present */
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
        drm_gem_object_release(obj);
 
        /* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
        /* What validation is needed here ? */
 
        /* Make it mmapable */
-       if (!obj->map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
-       /* GEM should really work out the hash offsets for us */
-       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
 out:
        drm_gem_object_unreference(obj);
 unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 
 /**
- *     psb_gem_dumb_destroy    -       destroy a dumb buffer
- *     @file: client file
- *     @dev: our DRM device
- *     @handle: the object handle
- *
- *     Destroy a handle that was created via psb_gem_dumb_create, at least
- *     we hope it was created that way. i915 seems to assume the caller
- *     does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                       uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
-/**
  *     psb_gem_fault           -       pagefault handler for GEM objects
  *     @vma: the VMA of the GEM object
  *     @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
        struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
        if (gtt == NULL)
                return -ENOMEM;
-       if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
-               goto free_gtt;
+
+       drm_gem_private_object_init(dev, &gtt->gem, size);
        if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
                return 0;
-free_gtt:
+
+       drm_gem_object_release(&gtt->gem);
        psb_gtt_free_range(dev, gtt);
        return -ENOMEM;
 }
index bddea58..d13c2fc 100644 (file)
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
        DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
                      DRM_AUTH),
@@ -652,7 +652,7 @@ static struct drm_driver driver = {
        .gem_vm_ops = &psb_gem_vm_ops,
        .dumb_create = psb_gem_dumb_create,
        .dumb_map_offset = psb_gem_dumb_map_gtt,
-       .dumb_destroy = psb_gem_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &psb_gem_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
index 6053b8a..984cacf 100644 (file)
@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
                        struct drm_file *file);
 extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                       uint32_t handle);
 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
                        uint32_t handle, uint64_t *offset);
 extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
index ada49ed..eac755b 100644 (file)
@@ -1241,7 +1241,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
        return 0;
 }
 
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
index 6e0acad..d4d16ed 100644 (file)
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
 extern int i810_max_ioctl;
 
 #define I810_BASE(reg)         ((unsigned long) \
index f48f1c4..05756f9 100644 (file)
@@ -1837,7 +1837,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
        kfree(file_priv);
 }
 
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
index 01d63a0..13457e3 100644 (file)
@@ -1038,7 +1038,7 @@ static struct drm_driver driver = {
 
        .dumb_create = i915_gem_dumb_create,
        .dumb_map_offset = i915_gem_mmap_gtt,
-       .dumb_destroy = i915_gem_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
index 82ea281..ab56820 100644 (file)
@@ -1612,7 +1612,7 @@ struct drm_i915_file_private {
 #define INTEL_RC6p_ENABLE                      (1<<1)
 #define INTEL_RC6pp_ENABLE                     (1<<2)
 
-extern struct drm_ioctl_desc i915_ioctls[];
+extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
@@ -1790,8 +1790,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                         uint32_t handle);
 /**
  * Returns true if seq1 is later than seq2.
  */
index 35d17fb..26c5f80 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
@@ -244,13 +245,6 @@ i915_gem_dumb_create(struct drm_file *file,
                               args->size, &args->handle);
 }
 
-int i915_gem_dumb_destroy(struct drm_file *file,
-                         struct drm_device *dev,
-                         uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 /**
  * Creates a new mm object and returns a handle to it.
  */
@@ -1426,11 +1420,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        if (!obj->fault_mappable)
                return;
 
-       if (obj->base.dev->dev_mapping)
-               unmap_mapping_range(obj->base.dev->dev_mapping,
-                                   (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
-                                   obj->base.size, 1);
-
+       drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
        obj->fault_mappable = false;
 }
 
@@ -1486,7 +1476,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (obj->base.map_list.map)
+       if (drm_vma_node_has_offset(&obj->base.vma_node))
                return 0;
 
        dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1517,9 +1507,6 @@ out:
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       if (!obj->base.map_list.map)
-               return;
-
        drm_gem_free_mmap_offset(&obj->base);
 }
 
@@ -1558,7 +1545,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (ret)
                goto out;
 
-       *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -3129,7 +3116,8 @@ search_free:
        ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
                                                  &vma->node,
                                                  size, alignment,
-                                                 obj->cache_level, 0, gtt_max);
+                                                 obj->cache_level, 0, gtt_max,
+                                                 DRM_MM_SEARCH_DEFAULT);
        if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
index dc53a52..f2e185c 100644 (file)
@@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                goto fail_detach;
        }
 
-       ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
-       if (ret) {
-               i915_gem_object_free(obj);
-               goto fail_detach;
-       }
-
+       drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
        obj->base.import_attach = attach;
 
index 4bbde2a..38afadf 100644 (file)
@@ -94,31 +94,36 @@ static int i915_setup_compression(struct drm_device *dev, int size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+       int ret;
 
-       /* Try to over-allocate to reduce reallocations and fragmentation */
-       compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                          size <<= 1, 4096, 0);
-       if (!compressed_fb)
-               compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                                  size >>= 1, 4096, 0);
-       if (compressed_fb)
-               compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+       compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
        if (!compressed_fb)
-               goto err;
+               goto err_llb;
+
+       /* Try to over-allocate to reduce reallocations and fragmentation */
+       ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+                                size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+                                        size >>= 1, 4096,
+                                        DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               goto err_llb;
 
        if (HAS_PCH_SPLIT(dev))
                I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
        else if (IS_GM45(dev)) {
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
        } else {
-               compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                                   4096, 4096, 0);
-               if (compressed_llb)
-                       compressed_llb = drm_mm_get_block(compressed_llb,
-                                                         4096, 4096);
+               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
                if (!compressed_llb)
                        goto err_fb;
 
+               ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+                                        4096, 4096, DRM_MM_SEARCH_DEFAULT);
+               if (ret)
+                       goto err_fb;
+
                dev_priv->fbc.compressed_llb = compressed_llb;
 
                I915_WRITE(FBC_CFB_BASE,
@@ -136,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
        return 0;
 
 err_fb:
-       drm_mm_put_block(compressed_fb);
-err:
+       kfree(compressed_llb);
+       drm_mm_remove_node(compressed_fb);
+err_llb:
+       kfree(compressed_fb);
        pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
        return -ENOSPC;
 }
@@ -165,11 +172,15 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
        if (dev_priv->fbc.size == 0)
                return;
 
-       if (dev_priv->fbc.compressed_fb)
-               drm_mm_put_block(dev_priv->fbc.compressed_fb);
+       if (dev_priv->fbc.compressed_fb) {
+               drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+               kfree(dev_priv->fbc.compressed_fb);
+       }
 
-       if (dev_priv->fbc.compressed_llb)
-               drm_mm_put_block(dev_priv->fbc.compressed_llb);
+       if (dev_priv->fbc.compressed_llb) {
+               drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+               kfree(dev_priv->fbc.compressed_llb);
+       }
 
        dev_priv->fbc.size = 0;
 }
@@ -273,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        if (obj == NULL)
                return NULL;
 
-       if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
-               goto cleanup;
-
+       drm_gem_private_object_init(dev, &obj->base, stolen->size);
        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
        obj->pages = i915_pages_create_for_stolen(dev,
@@ -304,6 +313,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
+       int ret;
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
@@ -312,17 +322,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        if (size == 0)
                return NULL;
 
-       stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
-       if (stolen)
-               stolen = drm_mm_get_block(stolen, size, 4096);
-       if (stolen == NULL)
+       stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+       if (!stolen)
+               return NULL;
+
+       ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+                                4096, DRM_MM_SEARCH_DEFAULT);
+       if (ret) {
+               kfree(stolen);
                return NULL;
+       }
 
        obj = _i915_gem_object_create_stolen(dev, stolen);
        if (obj)
                return obj;
 
-       drm_mm_put_block(stolen);
+       drm_mm_remove_node(stolen);
+       kfree(stolen);
        return NULL;
 }
 
@@ -368,7 +384,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        obj = _i915_gem_object_create_stolen(dev, stolen);
        if (obj == NULL) {
                DRM_DEBUG_KMS("failed to allocate stolen object\n");
-               drm_mm_put_block(stolen);
+               drm_mm_remove_node(stolen);
+               kfree(stolen);
                return NULL;
        }
 
@@ -406,7 +423,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        return obj;
 
 err_out:
-       drm_mm_put_block(stolen);
+       drm_mm_remove_node(stolen);
+       kfree(stolen);
        drm_gem_object_unreference(&obj->base);
        return NULL;
 }
@@ -415,7 +433,8 @@ void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
        if (obj->stolen) {
-               drm_mm_put_block(obj->stolen);
+               drm_mm_remove_node(obj->stolen);
+               kfree(obj->stolen);
                obj->stolen = NULL;
        }
 }
index 54558a0..ca4bc54 100644 (file)
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
        unsigned int agp_size;
 } drm_mga_private_t;
 
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
 extern int mga_max_ioctl;
 
                                /* mga_dma.c */
index 9c14514..37cc2fb 100644 (file)
@@ -1083,7 +1083,7 @@ file_priv)
        return 0;
 }
 
-struct drm_ioctl_desc mga_ioctls[] = {
+const struct drm_ioctl_desc mga_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
index 122b571..bd91964 100644 (file)
@@ -104,7 +104,7 @@ static struct drm_driver driver = {
        .gem_free_object = mgag200_gem_free_object,
        .dumb_create = mgag200_dumb_create,
        .dumb_map_offset = mgag200_dumb_mmap_offset,
-       .dumb_destroy = mgag200_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 };
 
 static struct pci_driver mgag200_pci_driver = {
index 12e2499..baaae19 100644 (file)
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
 int mgag200_dumb_create(struct drm_file *file,
                        struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
-int mgag200_dumb_destroy(struct drm_file *file,
-                        struct drm_device *dev,
-                        uint32_t handle);
 void mgag200_gem_free_object(struct drm_gem_object *obj);
 int
 mgag200_dumb_mmap_offset(struct drm_file *file,
index 9fa5685..0f8b861 100644 (file)
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int mgag200_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int mgag200_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 
 int
index 31cc8fe..054d9cf 100644 (file)
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
        if (ret)
                return ret;
 
-       DBG("status %*ph\n", 6, dp->stat);
+       DBG("status %6ph\n", dp->stat);
        return 0;
 }
 
index 907d20e..78637af 100644 (file)
@@ -674,13 +674,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
 }
 
 int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
-int
 nouveau_display_dumb_map_offset(struct drm_file *file_priv,
                                struct drm_device *dev,
                                uint32_t handle, uint64_t *poffset)
@@ -690,7 +683,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
                struct nouveau_bo *bo = gem->driver_private;
-               *poffset = bo->bo.addr_space_offset;
+               *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
        }
index 1ea3e47..185e741 100644 (file)
@@ -68,8 +68,6 @@ int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
                                 struct drm_mode_create_dumb *args);
 int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
                                     u32 handle, u64 *offset);
-int  nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
-                                 u32 handle);
 
 void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
 
index 6197266..1faa75f 100644 (file)
@@ -649,7 +649,7 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
        nouveau_cli_destroy(cli);
 }
 
-static struct drm_ioctl_desc
+static const struct drm_ioctl_desc
 nouveau_ioctls[] = {
        DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -684,7 +684,7 @@ nouveau_driver_fops = {
 static struct drm_driver
 driver = {
        .driver_features =
-               DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+               DRIVER_USE_AGP |
                DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
 
        .load = nouveau_drm_load,
@@ -704,6 +704,7 @@ driver = {
        .disable_vblank = nouveau_drm_vblank_disable,
 
        .ioctls = nouveau_ioctls,
+       .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
        .fops = &nouveau_driver_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +725,7 @@ driver = {
 
        .dumb_create = nouveau_display_dumb_create,
        .dumb_map_offset = nouveau_display_dumb_map_offset,
-       .dumb_destroy = nouveau_display_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -774,8 +775,6 @@ nouveau_drm_pci_driver = {
 static int __init
 nouveau_drm_init(void)
 {
-       driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
        if (nouveau_modeset == -1) {
 #ifdef CONFIG_VGA_CONSOLE
                if (vgacon_text_force())
index 830cb7b..487242f 100644 (file)
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
        }
 
        rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-       rep->map_handle = nvbo->bo.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
        rep->tile_mode = nvbo->tile_mode;
        rep->tile_flags = nvbo->tile_flags;
        return 0;
index a3004f1..2f9e22e 100644 (file)
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
        DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -633,7 +633,7 @@ static struct drm_driver omap_drm_driver = {
                .gem_vm_ops = &omap_gem_vm_ops,
                .dumb_create = omap_gem_dumb_create,
                .dumb_map_offset = omap_gem_dumb_map_offset,
-               .dumb_destroy = omap_gem_dumb_destroy,
+               .dumb_destroy = drm_gem_dumb_destroy,
                .ioctls = ioctls,
                .num_ioctls = DRM_OMAP_NUM_IOCTLS,
                .fops = &omapdriver_fops,
index 14f17da..f2ba425 100644 (file)
@@ -225,8 +225,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle);
 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
index ebbdf41..b1f1970 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
+#include <drm/drm_vma_manager.h>
 
 #include "omap_drv.h"
 #include "omap_dmm_tiler.h"
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
 static uint64_t mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       int ret;
+       size_t size;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (!obj->map_list.map) {
-               /* Make it mmapable */
-               size_t size = omap_gem_mmap_size(obj);
-               int ret = _drm_gem_create_mmap_offset_size(obj, size);
-
-               if (ret) {
-                       dev_err(dev->dev, "could not allocate mmap offset\n");
-                       return 0;
-               }
+       /* Make it mmapable */
+       size = omap_gem_mmap_size(obj);
+       ret = _drm_gem_create_mmap_offset_size(obj, size);
+       if (ret) {
+               dev_err(dev->dev, "could not allocate mmap offset\n");
+               return 0;
        }
 
-       return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+       return drm_vma_node_offset_addr(&obj->vma_node);
 }
 
 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 
 /**
- * omap_gem_dumb_destroy       -       destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via omap_gem_dumb_create.
- */
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
-/**
  * omap_gem_dumb_map   -       buffer mapping for dumb interface
  * @file: our drm client file
  * @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
        struct drm_device *dev = obj->dev;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       uint64_t off = 0;
+       uint64_t off;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (obj->map_list.map)
-               off = (uint64_t)obj->map_list.hash.key;
+       off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
                        omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
 
        list_del(&omap_obj->mm_list);
 
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
 
        /* this means the object is still pinned.. which really should
         * not happen.  I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                omap_obj->height = gsize.tiled.height;
        }
 
+       ret = 0;
        if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
-               ret = drm_gem_private_object_init(dev, obj, size);
+               drm_gem_private_object_init(dev, obj, size);
        else
                ret = drm_gem_object_init(dev, obj, size);
 
index f9eb679..dbb1575 100644 (file)
@@ -118,52 +118,7 @@ _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
-       struct drm_local_map *map;
-       int ret = 0;
-
-       /* Set the object up for mmap'ing */
-       list = &obj->map_list;
-       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-       if (!list->map)
-               return -ENOMEM;
-
-       map = list->map;
-       map->type = _DRM_GEM;
-       map->size = size;
-       map->handle = obj;
-
-       /* Get a DRM GEM mmap offset allocated... */
-       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-                       size / PAGE_SIZE, 0, 0);
-
-       if (!list->file_offset_node) {
-               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOSPC;
-               goto out_free_list;
-       }
-
-       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                       size / PAGE_SIZE, 0);
-       if (!list->file_offset_node) {
-               ret = -ENOMEM;
-               goto out_free_list;
-       }
-
-       list->hash.key = list->file_offset_node->start;
-       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-       if (ret) {
-               DRM_ERROR("failed to add to map hash\n");
-               goto out_free_mm;
-       }
-
-       return 0;
-
-out_free_mm:
-       drm_mm_put_block(list->file_offset_node);
-out_free_list:
-       kfree(list->map);
-       list->map = NULL;
 
-       return ret;
+       return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+                                 size / PAGE_SIZE);
 }
index df0b577..48f2dfd 100644 (file)
@@ -221,7 +221,7 @@ static struct drm_driver qxl_driver = {
 
        .dumb_create = qxl_mode_dumb_create,
        .dumb_map_offset = qxl_mode_dumb_mmap,
-       .dumb_destroy = qxl_mode_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = qxl_debugfs_init,
        .debugfs_cleanup = qxl_debugfs_takedown,
index aacb791..afd09d4 100644 (file)
@@ -319,7 +319,7 @@ struct qxl_device {
 /* forward declaration for QXL_INFO_IO */
 void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
 
-extern struct drm_ioctl_desc qxl_ioctls[];
+extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
 int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -418,9 +418,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
 int qxl_mode_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
-                         struct drm_device *dev,
-                         uint32_t handle);
 int qxl_mode_dumb_mmap(struct drm_file *filp,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p);
index 847c4ee..d34bb41 100644 (file)
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
 int qxl_mode_dumb_mmap(struct drm_file *file_priv,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p)
index 27f45e4..6cd7273 100644 (file)
@@ -402,7 +402,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
        return ret;
 }
 
-struct drm_ioctl_desc qxl_ioctls[] = {
+const struct drm_ioctl_desc qxl_ioctls[] = {
        DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
        DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
index ee7ad79..af10165 100644 (file)
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }
 
 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
index b443d67..1a648e1 100644 (file)
@@ -87,7 +87,7 @@ qxl_release_free(struct qxl_device *qdev,
 
        for (i = 0 ; i < release->bo_count; ++i) {
                QXL_INFO(qdev, "release %llx\n",
-                       release->bos[i]->tbo.addr_space_offset
+                       drm_vma_node_offset_addr(&release->bos[i]->tbo.vma_node)
                                                - DRM_FILE_OFFSET);
                qxl_fence_remove_release(&release->bos[i]->fence, release->id);
                qxl_bo_unref(&release->bos[i]);
index d4660cf..c451257 100644 (file)
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
        dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
index 930c71b..56eb5e3 100644 (file)
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
        drm_r128_freelist_t *list_entry;
 } drm_r128_buf_priv_t;
 
-extern struct drm_ioctl_desc r128_ioctls[];
+extern const struct drm_ioctl_desc r128_ioctls[];
 extern int r128_max_ioctl;
 
                                /* r128_cce.c */
index 19bb7e6..01dd9ae 100644 (file)
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
        r128_do_cleanup_cce(dev);
 }
 
-struct drm_ioctl_desc r128_ioctls[] = {
+const struct drm_ioctl_desc r128_ioctls[] = {
        DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index 064023b..c239739 100644 (file)
@@ -550,7 +550,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
                return false;
        }
 
-       DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+       DRM_DEBUG_KMS("link status %6ph\n", link_status);
        return true;
 }
 
index 6dacec4..6adbc99 100644 (file)
@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
                tmp = RREG32(CP_HPD_EOP_CONTROL);
                tmp &= ~EOP_SIZE_MASK;
-               tmp |= drm_order(MEC_HPD_SIZE / 8);
+               tmp |= order_base_2(MEC_HPD_SIZE / 8);
                WREG32(CP_HPD_EOP_CONTROL, tmp);
        }
        cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                        ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
 
                mqd->queue_state.cp_hqd_pq_control |=
-                       drm_order(rdev->ring[idx].ring_size / 8);
+                       order_base_2(rdev->ring[idx].ring_size / 8);
                mqd->queue_state.cp_hqd_pq_control |=
-                       (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+                       (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
 #ifdef __BIG_ENDIAN
                mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
 #endif
@@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
                WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 
                /* Set ring buffer size in dwords */
-               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
                rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index 038dcac..b67c9ec 100644 (file)
@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
        RREG32(GRBM_SOFT_RESET);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
index 56bd4f3..5b6e477 100644 (file)
@@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
 
                /* Set ring buffer size */
                ring = &rdev->ring[ridx[i]];
-               rb_cntl = drm_order(ring->ring_size / 8);
-               rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+               rb_cntl = order_base_2(ring->ring_size / 8);
+               rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
 #ifdef __BIG_ENDIAN
                rb_cntl |= BUF_SWAP_32BIT;
 #endif
@@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
                WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 
                /* Set ring buffer size in dwords */
-               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
                rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
index 75349cd..5625cf7 100644 (file)
@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        }
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 8);
+       rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        r100_cp_load_microcode(rdev);
        r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
index 393880a..319e1ee 100644 (file)
@@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
        int r;
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 8);
+       rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        ring->ring_size = ring_size;
        ring->align_mask = 16 - 1;
@@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev)
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 
        /* Set ring buffer size in dwords */
-       rb_bufsz = drm_order(ring->ring_size / 4);
+       rb_bufsz = order_base_2(ring->ring_size / 4);
        rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
        rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
        WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size);
+       rb_bufsz = order_base_2(ring->ring_size);
        rb_bufsz = (0x1 << 8) | rb_bufsz;
        WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
 
@@ -3812,7 +3812,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
        u32 rb_bufsz;
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 4);
+       rb_bufsz = order_base_2(ring_size / 4);
        ring_size = (1 << rb_bufsz) * 4;
        rdev->ih.ring_size = ring_size;
        rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4049,7 +4049,7 @@ int r600_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index 1c51c08..d8eb48b 100644 (file)
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
        dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
-       dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+       dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
 
        dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
-       dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+       dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
 
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
index 2f08219..19066d1 100644 (file)
@@ -492,9 +492,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
 int radeon_mode_dumb_mmap(struct drm_file *filp,
                          struct drm_device *dev,
                          uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle);
 
 /*
  * Semaphores.
index efc4f64..3cae2bb 100644 (file)
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
        dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
-       dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+       dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
 
        dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
-       dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+       dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
index 29876b1..fa7a7e1 100644 (file)
@@ -101,8 +101,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +109,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      int *vpos, int *hpos);
-extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
 int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +118,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle);
 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
                                                        size_t size,
@@ -390,8 +385,8 @@ static const struct file_operations radeon_driver_kms_fops = {
 
 static struct drm_driver kms_driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+           DRIVER_USE_AGP | DRIVER_USE_MTRR | 
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
            DRIVER_PRIME,
        .dev_priv_size = 0,
        .load = radeon_driver_load_kms,
@@ -421,10 +416,9 @@ static struct drm_driver kms_driver = {
        .gem_free_object = radeon_gem_object_free,
        .gem_open_object = radeon_gem_object_open,
        .gem_close_object = radeon_gem_object_close,
-       .dma_ioctl = radeon_dma_ioctl_kms,
        .dumb_create = radeon_mode_dumb_create,
        .dumb_map_offset = radeon_mode_dumb_mmap,
-       .dumb_destroy = radeon_mode_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &radeon_driver_kms_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
index aa79603..dce99c8 100644 (file)
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
 #if defined(CONFIG_DEBUG_FS)
 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
 {
index 49ff3d1..866c2b7 100644 (file)
@@ -683,16 +683,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
                                                     drmcrtc);
 }
 
-/*
- * IOCTL.
- */
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       /* Not valid in KMS. */
-       return -EINVAL;
-}
-
 #define KMS_INVALID_IOCTL(name)                                                \
 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
 {                                                                      \
@@ -732,7 +722,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
 KMS_INVALID_IOCTL(radeon_surface_free_kms)
 
 
-struct drm_ioctl_desc radeon_ioctls_kms[] = {
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index 49c82c4..209b111 100644 (file)
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
  * @bo:        radeon object for which we query the offset
  *
  * Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
  */
 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }
 
 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
index d325280..d71037f 100644 (file)
@@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring1  - compute only */
        /* Set ring buffer size */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring2 - compute only */
        /* Set ring buffer size */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index dc0fe09..5069d9c 100644 (file)
@@ -258,7 +258,7 @@ static struct drm_driver rcar_du_driver = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .dumb_create            = rcar_du_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .fops                   = &rcar_du_fops,
        .name                   = "rcar-du",
        .desc                   = "Renesas R-Car Display Unit",
index bd6b2cf..b17d071 100644 (file)
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
                drm_idlelock_release(&file_priv->master->lock);
 }
 
-struct drm_ioctl_desc savage_ioctls[] = {
+const struct drm_ioctl_desc savage_ioctls[] = {
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
index c05082a..335f8fc 100644 (file)
@@ -104,7 +104,7 @@ enum savage_family {
        S3_LAST
 };
 
-extern struct drm_ioctl_desc savage_ioctls[];
+extern const struct drm_ioctl_desc savage_ioctls[];
 extern int savage_max_ioctl;
 
 #define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
index 5f83f9a..7f2ea1a 100644 (file)
@@ -285,7 +285,7 @@ static struct drm_driver shmob_drm_driver = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .fops                   = &shmob_drm_fops,
        .name                   = "shmob-drm",
        .desc                   = "Renesas SH Mobile DRM",
index 13b527b..c31c025 100644 (file)
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
                                       struct drm_file *file_priv);
 extern void sis_lastclose(struct drm_device *dev);
 
-extern struct drm_ioctl_desc sis_ioctls[];
+extern const struct drm_ioctl_desc sis_ioctls[];
 extern int sis_max_ioctl;
 
 #endif
index 9a43d98..01857d8 100644 (file)
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
        if (pool == AGP_TYPE) {
                retval = drm_mm_insert_node(&dev_priv->agp_mm,
                                            &item->mm_node,
-                                           mem->size, 0);
+                                           mem->size, 0,
+                                           DRM_MM_SEARCH_DEFAULT);
                offset = item->mm_node.start;
        } else {
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 #else
                retval = drm_mm_insert_node(&dev_priv->vram_mm,
                                            &item->mm_node,
-                                           mem->size, 0);
+                                           mem->size, 0,
+                                           DRM_MM_SEARCH_DEFAULT);
                offset = item->mm_node.start;
 #endif
        }
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
        return;
 }
 
-struct drm_ioctl_desc sis_ioctls[] = {
+const struct drm_ioctl_desc sis_ioctls[] = {
        DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
index 40b71da..14801c2 100644 (file)
@@ -519,7 +519,7 @@ static struct drm_driver tilcdc_driver = {
        .gem_vm_ops         = &drm_gem_cma_vm_ops,
        .dumb_create        = drm_gem_cma_dumb_create,
        .dumb_map_offset    = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy       = drm_gem_cma_dumb_destroy,
+       .dumb_destroy       = drm_gem_dumb_destroy,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = tilcdc_debugfs_init,
        .debugfs_cleanup    = tilcdc_debugfs_cleanup,
index cb9dd67..f1a857e 100644 (file)
@@ -45,7 +45,6 @@
 #define TTM_DEBUG(fmt, arg...)
 #define TTM_BO_HASH_ORDER 13
 
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
-       write_lock(&bdev->vm_lock);
-       if (likely(bo->vm_node != NULL)) {
-               rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
-               drm_mm_put_block(bo->vm_node);
-               bo->vm_node = NULL;
-       }
-       write_unlock(&bdev->vm_lock);
+       drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
        ttm_mem_io_lock(man, false);
        ttm_mem_io_free_vm(bo);
        ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->resv = &bo->ttm_resv;
        reservation_object_init(bo->resv);
        atomic_inc(&bo->glob->bo_count);
+       drm_vma_node_reset(&bo->vma_node);
 
        ret = ttm_bo_check_placement(bo, placement);
 
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        if (likely(!ret) &&
            (bo->type == ttm_bo_type_device ||
             bo->type == ttm_bo_type_sg))
-               ret = ttm_bo_setup_vm(bo);
+               ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+                                        bo->mem.num_pages);
 
        locked = ww_mutex_trylock(&bo->resv->lock);
        WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                TTM_DEBUG("Swap list was clean\n");
        spin_unlock(&glob->lru_lock);
 
-       BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
-       write_lock(&bdev->vm_lock);
-       drm_mm_takedown(&bdev->addr_space_mm);
-       write_unlock(&bdev->vm_lock);
+       drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
        return ret;
 }
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 {
        int ret = -EINVAL;
 
-       rwlock_init(&bdev->vm_lock);
        bdev->driver = driver;
 
        memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        if (unlikely(ret != 0))
                goto out_no_sys;
 
-       bdev->addr_space_rb = RB_ROOT;
-       drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+       drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+                                   0x10000000);
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       loff_t offset = (loff_t) bo->addr_space_offset;
-       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
 
-       if (!bdev->dev_mapping)
-               return;
-       unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
        ttm_mem_io_free_vm(bo);
 }
 
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-       struct rb_node **cur = &bdev->addr_space_rb.rb_node;
-       struct rb_node *parent = NULL;
-       struct ttm_buffer_object *cur_bo;
-       unsigned long offset = bo->vm_node->start;
-       unsigned long cur_offset;
-
-       while (*cur) {
-               parent = *cur;
-               cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
-               cur_offset = cur_bo->vm_node->start;
-               if (offset < cur_offset)
-                       cur = &parent->rb_left;
-               else if (offset > cur_offset)
-                       cur = &parent->rb_right;
-               else
-                       BUG();
-       }
-
-       rb_link_node(&bo->vm_rb, parent, cur);
-       rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-       int ret;
-
-retry_pre_get:
-       ret = drm_mm_pre_get(&bdev->addr_space_mm);
-       if (unlikely(ret != 0))
-               return ret;
-
-       write_lock(&bdev->vm_lock);
-       bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
-                                        bo->mem.num_pages, 0, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
-                                             bo->mem.num_pages, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               write_unlock(&bdev->vm_lock);
-               goto retry_pre_get;
-       }
-
-       ttm_bo_vm_insert_rb(bo);
-       write_unlock(&bdev->vm_lock);
-       bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
-       return 0;
-out_unlock:
-       write_unlock(&bdev->vm_lock);
-       return ret;
-}
 
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool lazy, bool interruptible, bool no_wait)
index e4367f9..c58eba3 100644 (file)
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        lpfn = placement->lpfn;
        if (!lpfn)
                lpfn = man->size;
-       do {
-               ret = drm_mm_pre_get(mm);
-               if (unlikely(ret))
-                       return ret;
 
-               spin_lock(&rman->lock);
-               node = drm_mm_search_free_in_range(mm,
-                                       mem->num_pages, mem->page_alignment,
-                                       placement->fpfn, lpfn, 1);
-               if (unlikely(node == NULL)) {
-                       spin_unlock(&rman->lock);
-                       return 0;
-               }
-               node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-                                                    mem->page_alignment,
-                                                    placement->fpfn,
-                                                    lpfn);
-               spin_unlock(&rman->lock);
-       } while (node == NULL);
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       spin_lock(&rman->lock);
+       ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+                                         mem->page_alignment,
+                                         placement->fpfn, lpfn,
+                                         DRM_MM_SEARCH_BEST);
+       spin_unlock(&rman->lock);
+
+       if (unlikely(ret)) {
+               kfree(node);
+       } else {
+               mem->mm_node = node;
+               mem->start = node->start;
+       }
 
-       mem->mm_node = node;
-       mem->start = node->start;
        return 0;
 }
 
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 
        if (mem->mm_node) {
                spin_lock(&rman->lock);
-               drm_mm_put_block(mem->mm_node);
+               drm_mm_remove_node(mem->mm_node);
                spin_unlock(&rman->lock);
+
+               kfree(mem->mm_node);
                mem->mm_node = NULL;
        }
 }
index 319cf41..7cc904d 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/io.h>
 #include <linux/highmem.h>
 #include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
-       fbo->vm_node = NULL;
+       drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
        spin_lock(&bdev->fence_lock);
index 3df9f16..8c0e2c0 100644 (file)
@@ -33,6 +33,7 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/mm.h>
 #include <linux/rbtree.h>
 #include <linux/module.h>
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
-                                                    unsigned long page_start,
-                                                    unsigned long num_pages)
-{
-       struct rb_node *cur = bdev->addr_space_rb.rb_node;
-       unsigned long cur_offset;
-       struct ttm_buffer_object *bo;
-       struct ttm_buffer_object *best_bo = NULL;
-
-       while (likely(cur != NULL)) {
-               bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
-               cur_offset = bo->vm_node->start;
-               if (page_start >= cur_offset) {
-                       cur = cur->rb_right;
-                       best_bo = bo;
-                       if (page_start == cur_offset)
-                               break;
-               } else
-                       cur = cur->rb_left;
-       }
-
-       if (unlikely(best_bo == NULL))
-               return NULL;
-
-       if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
-                    (page_start + num_pages)))
-               return NULL;
-
-       return best_bo;
-}
-
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
        page_last = vma_pages(vma) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
 
        if (unlikely(page_offset >= bo->num_pages)) {
                retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
        .close = ttm_bo_vm_close
 };
 
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+                                                 unsigned long offset,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+       struct ttm_buffer_object *bo = NULL;
+
+       drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+       node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+       if (likely(node)) {
+               bo = container_of(node, struct ttm_buffer_object, vma_node);
+               if (!kref_get_unless_zero(&bo->kref))
+                       bo = NULL;
+       }
+
+       drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+       if (!bo)
+               pr_err("Could not find buffer object to map\n");
+
+       return bo;
+}
+
 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
                struct ttm_bo_device *bdev)
 {
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
        struct ttm_buffer_object *bo;
        int ret;
 
-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-                                vma_pages(vma));
-       if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
-               bo = NULL;
-       read_unlock(&bdev->vm_lock);
-
-       if (unlikely(bo == NULL)) {
-               pr_err("Could not find buffer object to map\n");
+       bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+       if (unlikely(!bo))
                return -EINVAL;
-       }
 
        driver = bo->bdev->driver;
        if (unlikely(!driver->verify_access)) {
@@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
        bool no_wait = false;
        bool dummy;
 
-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
-       if (likely(bo != NULL))
-               ttm_bo_reference(bo);
-       read_unlock(&bdev->vm_lock);
-
+       bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
        if (unlikely(bo == NULL))
                return -EFAULT;
 
@@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
        if (unlikely(ret != 0))
                goto out_unref;
 
-       kmap_offset = dev_offset - bo->vm_node->start;
+       kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
        if (unlikely(kmap_offset >= bo->num_pages)) {
                ret = -EFBIG;
                goto out_unref;
index c0770db..bb0af58 100644 (file)
@@ -84,7 +84,7 @@ static struct drm_driver driver = {
 
        .dumb_create = udl_dumb_create,
        .dumb_map_offset = udl_gem_mmap,
-       .dumb_destroy = udl_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &udl_driver_fops,
 
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
index cc6d90f..56aec94 100644 (file)
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
                    struct drm_mode_create_dumb *args);
 int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
                 uint32_t handle, uint64_t *offset);
-int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                    uint32_t handle);
 
 int udl_gem_init_object(struct drm_gem_object *obj);
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
index ef034fa..b5e3b80 100644 (file)
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
                              args->size, &args->handle);
 }
 
-int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        int ret;
@@ -223,8 +217,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
        if (obj->pages)
                udl_gem_put_pages(obj);
 
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
+       drm_gem_free_mmap_offset(gem_obj);
 }
 
 /* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +240,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
        ret = udl_gem_get_pages(gobj, GFP_KERNEL);
        if (ret)
                goto out;
-       if (!gobj->base.map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
 
-       *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
 
 out:
        drm_gem_object_unreference(&gobj->base);
index 0ce2d71..f5ae574 100644 (file)
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
        total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
                                    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
        if (total_len > 5) {
-               DRM_INFO("vendor descriptor length:%x data:%*ph\n",
-                       total_len, 11, desc);
+               DRM_INFO("vendor descriptor length:%x data:%11ph\n",
+                       total_len, desc);
 
                if ((desc[0] != total_len) || /* descriptor length */
                    (desc[1] != 0x5f) ||   /* vendor descriptor type */
index 13558f5..652f9b4 100644 (file)
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
        return ret;
 }
 
-struct drm_ioctl_desc via_ioctls[] = {
+const struct drm_ioctl_desc via_ioctls[] = {
        DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
index 893a650..a811ef2 100644 (file)
@@ -114,7 +114,7 @@ enum via_family {
 #define VIA_READ8(reg)         DRM_READ8(VIA_BASE, reg)
 #define VIA_WRITE8(reg, val)   DRM_WRITE8(VIA_BASE, reg, val)
 
-extern struct drm_ioctl_desc via_ioctls[];
+extern const struct drm_ioctl_desc via_ioctls[];
 extern int via_max_ioctl;
 
 extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
index 0ab93ff..7e3ad87 100644 (file)
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
        if (mem->type == VIA_MEM_AGP)
                retval = drm_mm_insert_node(&dev_priv->agp_mm,
                                            &item->mm_node,
-                                           tmpSize, 0);
+                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
        else
                retval = drm_mm_insert_node(&dev_priv->vram_mm,
                                            &item->mm_node,
-                                           tmpSize, 0);
+                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
        if (retval)
                goto fail_alloc;
 
index 78e2164..5086150 100644 (file)
  * Ioctl definitions.
  */
 
-static struct drm_ioctl_desc vmw_ioctls[] = {
+static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
        VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -782,7 +782,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
 
        if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
            && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
-               struct drm_ioctl_desc *ioctl =
+               const struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
                if (unlikely(ioctl->cmd_drv != cmd)) {
index 7953d1f..0e67cf4 100644 (file)
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                goto out_no_dmabuf;
 
        rep->handle = handle;
-       rep->map_handle = dma_buf->base.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
        rep->cur_gmr_id = handle;
        rep->cur_gmr_offset = 0;
 
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
        if (ret != 0)
                return -EINVAL;
 
-       *offset = out_buf->base.addr_space_offset;
+       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
        vmw_dmabuf_unreference(&out_buf);
        return 0;
 }
index e184b00..b128b90 100644 (file)
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
 }
 #endif
 
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
 #ifdef CONFIG_DRM_TEGRA_STAGING
        DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -633,7 +633,7 @@ struct drm_driver tegra_drm_driver = {
        .gem_vm_ops = &tegra_bo_vm_ops,
        .dumb_create = tegra_bo_dumb_create,
        .dumb_map_offset = tegra_bo_dumb_map_offset,
-       .dumb_destroy = tegra_bo_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
        .ioctls = tegra_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
index c5e9a9b..3c35622 100644 (file)
@@ -108,7 +108,7 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
 
 unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
 {
-       return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
+       return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node);
 }
 
 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
@@ -182,8 +182,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
 {
        struct tegra_bo *bo = to_tegra_bo(gem);
 
-       if (gem->map_list.map)
-               drm_gem_free_mmap_offset(gem);
+       drm_gem_free_mmap_offset(gem);
 
        drm_gem_object_release(gem);
        tegra_bo_destroy(gem->dev, bo);
@@ -262,9 +261,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 
        return ret;
 }
-
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
-                         unsigned int handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
index 34de2b4..2e93b03 100644 (file)
@@ -49,8 +49,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
                         struct drm_mode_create_dumb *args);
 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
                             uint32_t handle, uint64_t *offset);
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
-                         unsigned int handle);
 
 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
 
index 9854a1d..a890049 100644 (file)
@@ -783,7 +783,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
 }
 EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
 
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
        /* none so far */
 };
 
@@ -797,7 +797,7 @@ static struct drm_driver imx_drm_driver = {
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
 
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = imx_drm_enable_vblank,
index 12083dc..fba5473 100644 (file)
@@ -71,6 +71,7 @@
 #include <asm/pgalloc.h>
 #include <drm/drm.h>
 #include <drm/drm_sarea.h>
+#include <drm/drm_vma_manager.h>
 
 #include <linux/idr.h>
 
@@ -587,7 +588,6 @@ struct drm_map_list {
        struct drm_local_map *map;      /**< mapping */
        uint64_t user_token;
        struct drm_master *master;
-       struct drm_mm_node *file_offset_node;   /**< fake offset */
 };
 
 /**
@@ -622,8 +622,7 @@ struct drm_ati_pcigart_info {
  * GEM specific mm private for tracking GEM objects
  */
 struct drm_gem_mm {
-       struct drm_mm offset_manager;   /**< Offset mgmt for buffer objects */
-       struct drm_open_hash offset_hash; /**< User token hash table for maps */
+       struct drm_vma_offset_manager vma_manager;
 };
 
 /**
@@ -644,7 +643,7 @@ struct drm_gem_object {
        struct file *filp;
 
        /* Mapping info for this object */
-       struct drm_map_list map_list;
+       struct drm_vma_offset_node vma_node;
 
        /**
         * Size of the object, in bytes.  Immutable over the object's
@@ -737,6 +736,7 @@ struct drm_bus {
        int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
        /* hooks that are for PCI */
        int (*agp_init)(struct drm_device *dev);
+       void (*agp_destroy)(struct drm_device *dev);
 
 };
 
@@ -966,7 +966,7 @@ struct drm_driver {
 
        u32 driver_features;
        int dev_priv_size;
-       struct drm_ioctl_desc *ioctls;
+       const struct drm_ioctl_desc *ioctls;
        int num_ioctls;
        const struct file_operations *fops;
        union {
@@ -1131,12 +1131,7 @@ struct drm_device {
        /*@{ */
        int irq_enabled;                /**< True if irq handler is enabled */
        __volatile__ long context_flag; /**< Context swapping flag */
-       __volatile__ long interrupt_flag; /**< Interruption handler flag */
-       __volatile__ long dma_flag;     /**< DMA dispatch flag */
-       wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
-       int last_checked;               /**< Last context checked for DMA */
        int last_context;               /**< Last current context */
-       unsigned long last_switch;      /**< jiffies at last context switch */
        /*@} */
 
        struct work_struct work;
@@ -1174,12 +1169,8 @@ struct drm_device {
        spinlock_t event_lock;
 
        /*@} */
-       cycles_t ctx_start;
-       cycles_t lck_start;
 
        struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
-       wait_queue_head_t buf_readers;  /**< Processes waiting to read */
-       wait_queue_head_t buf_writers;  /**< Processes waiting to ctx switch */
 
        struct drm_agp_head *agp;       /**< AGP data */
 
@@ -1335,8 +1326,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 extern int drm_addctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
-extern int drm_modctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
 extern int drm_getctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 extern int drm_switchctx(struct drm_device *dev, void *data,
@@ -1405,7 +1394,6 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 extern int drm_mapbufs(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-extern int drm_order(unsigned long size);
 
                                /* DMA support (drm_dma.h) */
 extern int drm_dma_setup(struct drm_device *dev);
@@ -1466,6 +1454,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
 
                                /* AGP/GART support (drm_agpsupport.h) */
 extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern void drm_agp_destroy(struct drm_agp_head *agp);
+extern void drm_agp_clear(struct drm_device *dev);
 extern int drm_agp_acquire(struct drm_device *dev);
 extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv);
@@ -1561,6 +1551,9 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
 extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
 extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
 
+int drm_gem_dumb_destroy(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle);
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
@@ -1577,9 +1570,8 @@ extern int drm_vma_info(struct seq_file *m, void *data);
 
                                /* Scatter Gather Support (drm_scatter.h) */
 extern void drm_sg_cleanup(struct drm_sg_mem * entry);
-extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+extern int drm_sg_alloc(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
 extern int drm_sg_free(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
@@ -1613,8 +1605,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
-int drm_gem_private_object_init(struct drm_device *dev,
-                       struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_init(struct drm_device *dev,
+                                struct drm_gem_object *obj, size_t size);
 void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
@@ -1640,10 +1632,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
 static inline void
 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (obj != NULL) {
+       if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
                struct drm_device *dev = obj->dev;
+
                mutex_lock(&dev->struct_mutex);
-               kref_put(&obj->refcount, drm_gem_object_free);
+               if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+                       drm_gem_object_free(&obj->refcount);
                mutex_unlock(&dev->struct_mutex);
        }
 }
@@ -1661,24 +1655,6 @@ drm_gem_object_handle_reference(struct drm_gem_object *obj)
 }
 
 static inline void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-{
-       if (obj == NULL)
-               return;
-
-       if (atomic_read(&obj->handle_count) == 0)
-               return;
-       /*
-        * Must bump handle count first as this may be the last
-        * ref, in which case the object would disappear before we
-        * checked for a name
-        */
-       if (atomic_dec_and_test(&obj->handle_count))
-               drm_gem_object_handle_free(obj);
-       drm_gem_object_unreference(obj);
-}
-
-static inline void
 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 {
        if (obj == NULL)
index c34f27f..89b4d7d 100644 (file)
@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
 
-/*
- * destroy memory region allocated.
- *     - a gem handle and physical memory region pointed by a gem object
- *     would be released by drm_gem_handle_delete().
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *drm, unsigned int handle);
-
 /* allocate physical memory. */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
                unsigned int size);
index b87d05e..cba6786 100644 (file)
 /*
  * Generic range manager structs
  */
+#include <linux/bug.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/spinlock.h>
 #ifdef CONFIG_DEBUG_FS
 #include <linux/seq_file.h>
 #endif
 
+enum drm_mm_search_flags {
+       DRM_MM_SEARCH_DEFAULT =         0,
+       DRM_MM_SEARCH_BEST =            1 << 0,
+};
+
 struct drm_mm_node {
        struct list_head node_list;
        struct list_head hole_stack;
@@ -62,9 +70,6 @@ struct drm_mm {
        /* head_node.node_list is the list of all memory nodes, ordered
         * according to the (increasing) start address of the memory node. */
        struct drm_mm_node head_node;
-       struct list_head unused_nodes;
-       int num_unused;
-       spinlock_t unused_lock;
        unsigned int scan_check_range : 1;
        unsigned scan_alignment;
        unsigned long scan_color;
@@ -115,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
                                                &(mm)->head_node.node_list, \
                                                node_list)
-#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
-       for (entry = (mm)->prev_scanned_node, \
-               next = entry ? list_entry(entry->node_list.next, \
-                       struct drm_mm_node, node_list) : NULL; \
-            entry != NULL; entry = next, \
-               next = entry ? list_entry(entry->node_list.next, \
-                       struct drm_mm_node, node_list) : NULL) \
 
 /* Note that we need to unroll list_for_each_entry in order to inline
  * setting hole_start and hole_end on each iteration and keep the
@@ -139,121 +137,49 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  * Basic range manager support (drm_mm.c)
  */
 extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
-extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
-                                                   unsigned long size,
-                                                   unsigned alignment,
-                                                   unsigned long color,
-                                                   int atomic);
-extern struct drm_mm_node *drm_mm_get_block_range_generic(
-                                               struct drm_mm_node *node,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               int atomic);
-
-static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
-                                                  unsigned long size,
-                                                  unsigned alignment)
-{
-       return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
-                                                         unsigned long size,
-                                                         unsigned alignment)
-{
-       return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
-}
-static inline struct drm_mm_node *drm_mm_get_block_range(
-                                               struct drm_mm_node *parent,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end)
-{
-       return drm_mm_get_block_range_generic(parent, size, alignment, 0,
-                                             start, end, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
-                                               struct drm_mm_node *parent,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end)
-{
-       return drm_mm_get_block_range_generic(parent, size, alignment, 0,
-                                               start, end, 1);
-}
 
-extern int drm_mm_insert_node(struct drm_mm *mm,
-                             struct drm_mm_node *node,
-                             unsigned long size,
-                             unsigned alignment);
-extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
-                                      struct drm_mm_node *node,
-                                      unsigned long size,
-                                      unsigned alignment,
-                                      unsigned long start,
-                                      unsigned long end);
 extern int drm_mm_insert_node_generic(struct drm_mm *mm,
                                      struct drm_mm_node *node,
                                      unsigned long size,
                                      unsigned alignment,
-                                     unsigned long color);
+                                     unsigned long color,
+                                     enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node(struct drm_mm *mm,
+                                    struct drm_mm_node *node,
+                                    unsigned long size,
+                                    unsigned alignment,
+                                    enum drm_mm_search_flags flags)
+{
+       return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
+}
+
 extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
                                       struct drm_mm_node *node,
                                       unsigned long size,
                                       unsigned alignment,
                                       unsigned long color,
                                       unsigned long start,
-                                      unsigned long end);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern void drm_mm_remove_node(struct drm_mm_node *node);
-extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                                     unsigned long size,
-                                                     unsigned alignment,
-                                                     unsigned long color,
-                                                     bool best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
-                                               const struct drm_mm *mm,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               bool best_match);
-static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-                                                    unsigned long size,
-                                                    unsigned alignment,
-                                                    bool best_match)
+                                      unsigned long end,
+                                      enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+                                             struct drm_mm_node *node,
+                                             unsigned long size,
+                                             unsigned alignment,
+                                             unsigned long start,
+                                             unsigned long end,
+                                             enum drm_mm_search_flags flags)
 {
-       return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
-}
-static inline  struct drm_mm_node *drm_mm_search_free_in_range(
-                                               const struct drm_mm *mm,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               bool best_match)
-{
-       return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
-                                                  start, end, best_match);
+       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+                                                  0, start, end, flags);
 }
 
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 extern void drm_mm_init(struct drm_mm *mm,
                        unsigned long start,
                        unsigned long size);
 extern void drm_mm_takedown(struct drm_mm *mm);
 extern int drm_mm_clean(struct drm_mm *mm);
-extern int drm_mm_pre_get(struct drm_mm *mm);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
-{
-       return block->mm;
-}
 
 void drm_mm_init_scan(struct drm_mm *mm,
                      unsigned long size,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644 (file)
index 0000000..22eedac
--- /dev/null
@@ -0,0 +1,224 @@
+#ifndef __DRM_VMA_MANAGER_H__
+#define __DRM_VMA_MANAGER_H__
+
+/*
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mm.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct drm_vma_offset_node {
+       struct drm_mm_node vm_node;
+       struct rb_node vm_rb;
+};
+
+struct drm_vma_offset_manager {
+       rwlock_t vm_lock;
+       struct rb_root vm_addr_space_rb;
+       struct drm_mm vm_addr_space_mm;
+};
+
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+                                unsigned long page_offset, unsigned long size);
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+                                                 unsigned long start,
+                                                 unsigned long pages);
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+                                                          unsigned long start,
+                                                          unsigned long pages);
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+                      struct drm_vma_offset_node *node, unsigned long pages);
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+                          struct drm_vma_offset_node *node);
+
+/**
+ * drm_vma_offset_exact_lookup() - Look up node by exact address
+ * @mgr: Manager object
+ * @start: Start address (page-based, not byte-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
+ * It only returns the exact object with the given start address.
+ *
+ * RETURNS:
+ * Node at exact start address @start.
+ */
+static inline struct drm_vma_offset_node *
+drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
+                           unsigned long start,
+                           unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+
+       node = drm_vma_offset_lookup(mgr, start, pages);
+       return (node && node->vm_node.start == start) ? node : NULL;
+}
+
+/**
+ * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
+ * are allowed while holding this lock. All other contexts are blocked from VMA
+ * until the lock is released via drm_vma_offset_unlock_lookup().
+ *
+ * Use this if you need to take a reference to the objects returned by
+ * drm_vma_offset_lookup_locked() before releasing this lock again.
+ *
+ * This lock must not be used for anything else than extended lookups. You must
+ * not call any other VMA helpers while holding this lock.
+ *
+ * Note: You're in atomic-context while holding this lock!
+ *
+ * Example:
+ *   drm_vma_offset_lock_lookup(mgr);
+ *   node = drm_vma_offset_lookup_locked(mgr);
+ *   if (node)
+ *       kref_get_unless_zero(container_of(node, sth, entr));
+ *   drm_vma_offset_unlock_lookup(mgr);
+ */
+static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
+{
+       read_lock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
+ */
+static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
+{
+       read_unlock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_node_reset() - Initialize or reset node object
+ * @node: Node to initialize or reset
+ *
+ * Reset a node to its initial state. This must be called if @node isn't
+ * already cleared (eg., via kzalloc) before using it with any VMA offset
+ * manager.
+ *
+ * This must not be called on an already allocated node, or you will leak
+ * memory.
+ */
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+       memset(node, 0, sizeof(*node));
+}
+
+/**
+ * drm_vma_node_start() - Return start address for page-based addressing
+ * @node: Node to inspect
+ *
+ * Return the start address of the given node. This can be used as offset into
+ * the linear VM space that is provided by the VMA offset manager. Note that
+ * this can only be used for page-based addressing. If you need a proper offset
+ * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
+ * drm_vma_node_offset_addr() helper instead.
+ *
+ * RETURNS:
+ * Start address of @node for page-based addressing. 0 if the node does not
+ * have an offset allocated.
+ */
+static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+       return node->vm_node.start;
+}
+
+/**
+ * drm_vma_node_size() - Return size (page-based)
+ * @node: Node to inspect
+ *
+ * Return the size as number of pages for the given node. This is the same size
+ * that was passed to drm_vma_offset_add(). If no offset is allocated for the
+ * node, this is 0.
+ *
+ * RETURNS:
+ * Size of @node as number of pages. 0 if the node does not have an offset
+ * allocated.
+ */
+static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
+{
+       return node->vm_node.size;
+}
+
+/**
+ * drm_vma_node_has_offset() - Check whether node is added to offset manager
+ * @node: Node to be checked
+ *
+ * RETURNS:
+ * true iff the node was previously allocated an offset and added to
+ * an vma offset manager.
+ */
+static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
+{
+       return drm_mm_node_allocated(&node->vm_node);
+}
+
+/**
+ * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
+ * @node: Linked offset node
+ *
+ * Same as drm_vma_node_start() but returns the address as a valid offset that
+ * can be used for user-space mappings during mmap().
+ * This must not be called on unlinked nodes.
+ *
+ * RETURNS:
+ * Offset of @node for byte-based addressing. 0 if the node does not have an
+ * object allocated.
+ */
+static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+       return ((__u64)node->vm_node.start) << PAGE_SHIFT;
+}
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists or
+ * the address-space is invalid, nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+                                     struct address_space *file_mapping)
+{
+       if (file_mapping && drm_vma_node_has_offset(node))
+               unmap_mapping_range(file_mapping,
+                                   drm_vma_node_offset_addr(node),
+                                   drm_vma_node_size(node) << PAGE_SHIFT, 1);
+}
+
+#endif /* __DRM_VMA_MANAGER_H__ */
index 8a6aa56..751eaff 100644 (file)
 #define _TTM_BO_API_H_
 
 #include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
-#include <linux/rbtree.h>
 #include <linux/bitmap.h>
 #include <linux/reservation.h>
 
@@ -145,7 +145,6 @@ struct ttm_tt;
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
  * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is put on the delayed delete list.
@@ -166,8 +165,7 @@ struct ttm_tt;
  * @swap: List head for swap LRU list.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -194,7 +192,6 @@ struct ttm_buffer_object {
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
        unsigned long num_pages;
-       uint64_t addr_space_offset;
        size_t acc_size;
 
        /**
@@ -238,13 +235,7 @@ struct ttm_buffer_object {
        void *sync_obj;
        unsigned long priv_flags;
 
-       /**
-        * Members protected by the bdev::vm_lock
-        */
-
-       struct rb_node vm_rb;
-       struct drm_mm_node *vm_node;
-
+       struct drm_vma_offset_node vma_node;
 
        /**
         * Special members that are protected by the reserve lock
index 984fc2d..8639c85 100644 (file)
@@ -36,6 +36,7 @@
 #include <ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
 #include <linux/spinlock.h>
@@ -519,7 +520,7 @@ struct ttm_bo_global {
  * @man: An array of mem_type_managers.
  * @fence_lock: Protects the synchronizing members on *all* bos belonging
  * to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
@@ -537,14 +538,13 @@ struct ttm_bo_device {
        struct list_head device_list;
        struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
-       rwlock_t vm_lock;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        spinlock_t fence_lock;
+
        /*
-        * Protected by the vm lock.
+        * Protected by internal locks.
         */
-       struct rb_root addr_space_rb;
-       struct drm_mm addr_space_mm;
+       struct drm_vma_offset_manager vma_manager;
 
        /*
         * Protected by the global:lru lock.
index 238a166..272580c 100644 (file)
@@ -181,7 +181,7 @@ enum drm_map_type {
        _DRM_AGP = 3,             /**< AGP/GART */
        _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
        _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
-       _DRM_GEM = 6,             /**< GEM object */
+       _DRM_GEM = 6,             /**< GEM object (obsolete) */
 };
 
 /**