radeon: FEDORA: add old DMA buffers on top of GEM
authorDave Airlie <airlied@redhat.com>
Wed, 13 Aug 2008 23:10:11 +0000 (09:10 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 13 Aug 2008 23:10:11 +0000 (09:10 +1000)
This really shouldn't go upstream, it just lets me
run the old 3D driver on GEM setup system

linux-core/drm_bufs.c
linux-core/drm_dma.c
linux-core/drm_drv.c
linux-core/drm_fops.c
linux-core/drm_memory.c
linux-core/radeon_gem.c
shared-core/radeon_drv.h

index e905257..c966bad 100644 (file)
@@ -1528,6 +1528,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        dev->buf_use++;         /* Can't allocate more after this call */
        spin_unlock(&dev->count_lock);
 
+       DRM_DEBUG("dma buf count %d, req count %d\n", request->count, dma->buf_count);
        if (request->count >= dma->buf_count) {
                if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
                    || (drm_core_check_feature(dev, DRIVER_SG)
@@ -1538,10 +1539,12 @@ int drm_mapbufs(struct drm_device *dev, void *data,
                        unsigned long token = dev->agp_buffer_token;
 
                        if (!map) {
+                               DRM_DEBUG("No map\n");
                                retcode = -EINVAL;
                                goto done;
                        }
                        down_write(&current->mm->mmap_sem);
+                       DRM_DEBUG("%x %d\n", token, map->size);
                        virtual = do_mmap(file_priv->filp, 0, map->size,
                                          PROT_READ | PROT_WRITE,
                                          MAP_SHARED,
@@ -1555,6 +1558,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
                        up_write(&current->mm->mmap_sem);
                }
                if (virtual > -1024UL) {
+                       DRM_DEBUG("mmap failed\n");
                        /* Real error */
                        retcode = (signed long)virtual;
                        goto done;
index f7bff0a..1e6aeef 100644 (file)
@@ -58,6 +58,7 @@ int drm_dma_setup(struct drm_device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL(drm_dma_setup);
 
 /**
  * Cleanup the DMA resources.
@@ -120,6 +121,7 @@ void drm_dma_takedown(struct drm_device *dev)
        drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
        dev->dma = NULL;
 }
+EXPORT_SYMBOL(drm_dma_takedown);
 
 /**
  * Free a buffer.
index 2a6bebd..7c43fd0 100644 (file)
@@ -250,7 +250,7 @@ int drm_lastclose(struct drm_device * dev)
        }
        dev->queue_count = 0;
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET))
                drm_dma_takedown(dev);
 
        dev->dev_mapping = NULL;
index f45d5e4..7bc73d2 100644 (file)
@@ -54,10 +54,11 @@ static int drm_setup(struct drm_device * dev)
 
        atomic_set(&dev->ioctl_count, 0);
        atomic_set(&dev->vma_count, 0);
-       dev->buf_use = 0;
-       atomic_set(&dev->buf_alloc, 0);
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev->buf_use = 0;
+               atomic_set(&dev->buf_alloc, 0);
+
                i = drm_dma_setup(dev);
                if (i < 0)
                        return i;
index 4b494f9..a663e96 100644 (file)
@@ -188,6 +188,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
        }
        return pt;
 }
+EXPORT_SYMBOL(drm_realloc);
 
 /**
  * Allocate pages.
index 1ee4851..250e685 100644 (file)
@@ -29,6 +29,8 @@
 
 static int radeon_gem_ib_init(struct drm_device *dev);
 static int radeon_gem_ib_destroy(struct drm_device *dev);
+static int radeon_gem_dma_bufs_init(struct drm_device *dev);
+static void radeon_gem_dma_bufs_destroy(struct drm_device *dev);
 
 int radeon_gem_init_object(struct drm_gem_object *obj)
 {
@@ -608,6 +610,7 @@ int radeon_alloc_gart_objects(struct drm_device *dev)
 
        /* init the indirect buffers */
        radeon_gem_ib_init(dev);
+       radeon_gem_dma_bufs_init(dev);
        return 0;                         
 
 }
@@ -650,6 +653,7 @@ void radeon_gem_mm_fini(struct drm_device *dev)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
 
+       radeon_gem_dma_bufs_destroy(dev);
        radeon_gem_ib_destroy(dev);
 
        mutex_lock(&dev->struct_mutex);
@@ -878,3 +882,247 @@ free_all:
        return -ENOMEM;
 }
 
+#define RADEON_DMA_BUFFER_SIZE (64 * 1024)
+#define RADEON_DMA_BUFFER_COUNT (16)
+
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+                                 struct drm_buf_entry * entry)
+{
+       int i;
+
+       if (entry->seg_count) {
+               for (i = 0; i < entry->seg_count; i++) {
+                       if (entry->seglist[i]) {
+                               drm_pci_free(dev, entry->seglist[i]);
+                       }
+               }
+               drm_free(entry->seglist,
+                        entry->seg_count *
+                        sizeof(*entry->seglist), DRM_MEM_SEGS);
+
+               entry->seg_count = 0;
+       }
+
+       if (entry->buf_count) {
+               for (i = 0; i < entry->buf_count; i++) {
+                       if (entry->buflist[i].dev_private) {
+                               drm_free(entry->buflist[i].dev_private,
+                                        entry->buflist[i].dev_priv_size,
+                                        DRM_MEM_BUFS);
+                       }
+               }
+               drm_free(entry->buflist,
+                        entry->buf_count *
+                        sizeof(*entry->buflist), DRM_MEM_BUFS);
+
+               entry->buf_count = 0;
+       }
+}
+
+static int radeon_gem_addbufs(struct drm_device *dev)
+{
+       struct drm_radeon_private *dev_priv = dev->dev_private;
+       struct drm_device_dma *dma = dev->dma;
+       struct drm_buf_entry *entry;
+       struct drm_buf *buf;
+       unsigned long offset;
+       unsigned long agp_offset;
+       int count;
+       int order;
+       int size;
+       int alignment;
+       int page_order;
+       int total;
+       int byte_count;
+       int i;
+       struct drm_buf **temp_buflist;
+       
+       if (!dma)
+               return -EINVAL;
+
+       count = RADEON_DMA_BUFFER_COUNT;
+       order = drm_order(RADEON_DMA_BUFFER_SIZE);
+       size = 1 << order;
+
+       alignment = PAGE_ALIGN(size);
+       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+       total = PAGE_SIZE << page_order;
+
+       byte_count = 0;
+       agp_offset = dev_priv->mm.dma_bufs.bo->offset;
+
+       DRM_DEBUG("count:      %d\n", count);
+       DRM_DEBUG("order:      %d\n", order);
+       DRM_DEBUG("size:       %d\n", size);
+       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("alignment:  %d\n", alignment);
+       DRM_DEBUG("page_order: %d\n", page_order);
+       DRM_DEBUG("total:      %d\n", total);
+
+       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+               return -EINVAL;
+       if (dev->queue_count)
+               return -EBUSY;  /* Not while in use */
+
+       spin_lock(&dev->count_lock);
+       if (dev->buf_use) {
+               spin_unlock(&dev->count_lock);
+               return -EBUSY;
+       }
+       atomic_inc(&dev->buf_alloc);
+       spin_unlock(&dev->count_lock);
+
+       mutex_lock(&dev->struct_mutex);
+       entry = &dma->bufs[order];
+       if (entry->buf_count) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM; /* May only call once for each order */
+       }
+
+       if (count < 0 || count > 4096) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -EINVAL;
+       }
+
+       entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+                                  DRM_MEM_BUFS);
+       if (!entry->buflist) {
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+       entry->buf_size = size;
+       entry->page_order = page_order;
+
+       offset = 0;
+
+       while (entry->buf_count < count) {
+               buf = &entry->buflist[entry->buf_count];
+               buf->idx = dma->buf_count + entry->buf_count;
+               buf->total = alignment;
+               buf->order = order;
+               buf->used = 0;
+
+               buf->offset = (dma->byte_count + offset);
+               buf->bus_address = dev_priv->gart_vm_start + agp_offset + offset;
+               buf->address = (void *)(agp_offset + offset);
+               buf->next = NULL;
+               buf->waiting = 0;
+               buf->pending = 0;
+               init_waitqueue_head(&buf->dma_wait);
+               buf->file_priv = NULL;
+
+               buf->dev_priv_size = dev->driver->dev_priv_size;
+               buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+               if (!buf->dev_private) {
+                       /* Set count correctly so we free the proper amount. */
+                       entry->buf_count = count;
+                       drm_cleanup_buf_error(dev, entry);
+                       mutex_unlock(&dev->struct_mutex);
+                       atomic_dec(&dev->buf_alloc);
+                       return -ENOMEM;
+               }
+
+               memset(buf->dev_private, 0, buf->dev_priv_size);
+
+               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+               offset += alignment;
+               entry->buf_count++;
+               byte_count += PAGE_SIZE << page_order;
+       }
+
+       DRM_DEBUG("byte_count: %d\n", byte_count);
+
+       temp_buflist = drm_realloc(dma->buflist,
+                                  dma->buf_count * sizeof(*dma->buflist),
+                                  (dma->buf_count + entry->buf_count)
+                                  * sizeof(*dma->buflist), DRM_MEM_BUFS);
+       if (!temp_buflist) {
+               /* Free the entry because it isn't valid */
+               drm_cleanup_buf_error(dev, entry);
+               mutex_unlock(&dev->struct_mutex);
+               atomic_dec(&dev->buf_alloc);
+               return -ENOMEM;
+       }
+       dma->buflist = temp_buflist;
+
+       for (i = 0; i < entry->buf_count; i++) {
+               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+       }
+
+       dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
+       dma->byte_count += byte_count;
+
+       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       dma->flags = _DRM_DMA_USE_SG;
+       atomic_dec(&dev->buf_alloc);
+       return 0;
+}
+
+static int radeon_gem_dma_bufs_init(struct drm_device *dev)
+{
+       struct drm_radeon_private *dev_priv = dev->dev_private;
+       int size = RADEON_DMA_BUFFER_SIZE * RADEON_DMA_BUFFER_COUNT;
+       int ret;
+
+       ret = drm_dma_setup(dev);
+       if (ret < 0)
+               return ret;
+
+       ret = drm_buffer_object_create(dev, size, drm_bo_type_device,
+                                      DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_NO_EVICT |
+                                      DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MAPPABLE, 0,
+                                      0, 0, &dev_priv->mm.dma_bufs.bo);
+       if (ret) {
+               DRM_ERROR("Failed to create DMA bufs\n");
+               return -ENOMEM;
+       }
+
+       ret = drm_bo_kmap(dev_priv->mm.dma_bufs.bo, 0, size >> PAGE_SHIFT,
+                         &dev_priv->mm.dma_bufs.kmap);
+       if (ret) {
+               DRM_ERROR("Failed to mmap DMA buffers\n");
+               return -ENOMEM;
+       }
+       DRM_DEBUG("\n");
+       radeon_gem_addbufs(dev);
+
+       DRM_DEBUG("%x %d\n", dev_priv->mm.dma_bufs.bo->map_list.hash.key, size);
+       dev->agp_buffer_token = dev_priv->mm.dma_bufs.bo->map_list.hash.key << PAGE_SHIFT;
+       dev_priv->mm.fake_agp_map.handle = dev_priv->mm.dma_bufs.kmap.virtual;
+       dev_priv->mm.fake_agp_map.size = size;
+       
+       dev->agp_buffer_map = &dev_priv->mm.fake_agp_map;
+
+       return 0;
+}
+
+static void radeon_gem_dma_bufs_destroy(struct drm_device *dev)
+{
+
+       struct drm_radeon_private *dev_priv = dev->dev_private;
+       drm_dma_takedown(dev);
+
+       drm_bo_kunmap(&dev_priv->mm.dma_bufs.kmap);
+       drm_bo_usage_deref_unlocked(&dev_priv->mm.dma_bufs.bo);
+}
index 51a5b00..138db79 100644 (file)
@@ -277,6 +277,9 @@ struct radeon_mm_info {
        struct radeon_mm_obj pcie_table;
        struct radeon_mm_obj ring;
        struct radeon_mm_obj ring_read;
+
+       struct radeon_mm_obj dma_bufs;
+       struct drm_map fake_agp_map;
 };
 
 #include "radeon_mode.h"