User buffer support.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 2 Nov 2007 15:03:41 +0000 (16:03 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 2 Nov 2007 15:05:25 +0000 (16:05 +0100)
libdrm/xf86drm.c
linux-core/drm_bo.c
linux-core/drm_objects.h
linux-core/drm_ttm.c

index 5a4071b..165cb2f 100644 (file)
@@ -2612,9 +2612,9 @@ int drmBOCreate(int fd, unsigned long size,
     ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg);
     if (ret)
        return ret;
-    
+
     drmBOCopyReply(rep, buf);
-    buf->mapVirtual = NULL;
+    buf->virtual = user_buffer;
     buf->mapCount = 0;
 
     return 0;
@@ -2644,7 +2644,7 @@ int drmBOUnreference(int fd, drmBO *buf)
 {
     struct drm_bo_handle_arg arg;
 
-    if (buf->mapVirtual) {
+    if (buf->mapVirtual && buf->mapHandle) {
        (void) munmap(buf->mapVirtual, buf->start + buf->size);
        buf->mapVirtual = NULL;
        buf->virtual = NULL;
index dc96e8a..ea680a1 100644 (file)
@@ -137,9 +137,9 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
 {
        struct drm_device *dev = bo->dev;
        int ret = 0;
-       bo->ttm = NULL;
 
        DRM_ASSERT_LOCKED(&bo->mutex);
+       bo->ttm = NULL;
 
        switch (bo->type) {
        case drm_bo_type_dc:
@@ -149,6 +149,18 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
                        ret = -ENOMEM;
                break;
        case drm_bo_type_user:
+               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               if (!bo->ttm)
+                       ret = -ENOMEM;
+
+               ret = drm_ttm_set_user(bo->ttm, current,
+                                      bo->mem.mask & DRM_BO_FLAG_WRITE,
+                                      bo->buffer_start,
+                                      bo->num_pages,
+                                      dev->bm.dummy_read_page);
+               if (ret)
+                       return ret;
+
                break;
        default:
                DRM_ERROR("Illegal buffer object type\n");
@@ -784,12 +796,15 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
 }
 
 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
+                               int disallow_fixed,
                                uint32_t mem_type,
                                uint64_t mask, uint32_t * res_mask)
 {
        uint64_t cur_flags = drm_bo_type_flags(mem_type);
        uint64_t flag_diff;
 
+       if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
+               return 0;
        if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
                cur_flags |= DRM_BO_FLAG_CACHED;
        if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
@@ -854,7 +869,9 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
                mem_type = prios[i];
                man = &bm->man[mem_type];
 
-               type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
+               type_ok = drm_bo_mt_compatible(man,
+                                              bo->type == drm_bo_type_user,
+                                              mem_type, mem->mask,
                                               &cur_flags);
 
                if (!type_ok)
@@ -903,7 +920,11 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
                if (!man->has_type)
                        continue;
 
-               if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
+               if (!drm_bo_mt_compatible(man,
+                                         bo->type == drm_bo_type_user,
+                                         mem_type,
+                                         mem->mask,
+                                         &cur_flags))
                        continue;
 
                ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
@@ -928,8 +949,10 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo,
 {
        uint32_t new_props;
 
-       if (bo->type == drm_bo_type_user) {
-               DRM_ERROR("User buffers are not supported yet.\n");
+       if (bo->type == drm_bo_type_user &&
+           ((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
+            (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
+               DRM_ERROR("User buffers require cache-coherent memory.\n");
                return -EINVAL;
        }
 
@@ -1120,7 +1143,12 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
        rep->flags = bo->mem.flags;
        rep->size = bo->num_pages * PAGE_SIZE;
        rep->offset = bo->offset;
-       rep->arg_handle = bo->map_list.user_token;
+
+       if (bo->type == drm_bo_type_dc)
+               rep->arg_handle = bo->map_list.user_token;
+       else
+               rep->arg_handle = 0;
+
        rep->mask = bo->mem.mask;
        rep->buffer_start = bo->buffer_start;
        rep->fence_flags = bo->fence_type;
@@ -1619,10 +1647,7 @@ int drm_buffer_object_create(struct drm_device *dev,
        int ret = 0;
        unsigned long num_pages;
 
-       if (buffer_start & ~PAGE_MASK) {
-               DRM_ERROR("Invalid buffer object start.\n");
-               return -EINVAL;
-       }
+       size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (num_pages == 0) {
                DRM_ERROR("Illegal buffer object size.\n");
@@ -1648,23 +1673,20 @@ int drm_buffer_object_create(struct drm_device *dev,
        INIT_LIST_HEAD(&bo->vma_list);
 #endif
        bo->dev = dev;
-       if (buffer_start != 0)
-               bo->type = drm_bo_type_user;
-       else
-               bo->type = type;
+       bo->type = type;
        bo->num_pages = num_pages;
        bo->mem.mem_type = DRM_BO_MEM_LOCAL;
        bo->mem.num_pages = bo->num_pages;
        bo->mem.mm_node = NULL;
        bo->mem.page_alignment = page_alignment;
-       bo->buffer_start = buffer_start;
+       bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
-       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | 
+       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
                DRM_BO_FLAG_MAPPABLE;
        bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
                DRM_BO_FLAG_MAPPABLE;
        atomic_inc(&bm->count);
-       ret = drm_bo_new_mask(bo, mask, hint);
+       ret = drm_bo_new_mask(bo, mask, mask);
        if (ret)
                goto out_err;
 
@@ -1720,6 +1742,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
        struct drm_bo_create_req *req = &arg->d.req;
        struct drm_bo_info_rep *rep = &arg->d.rep;
        struct drm_buffer_object *entry;
+       enum drm_bo_type bo_type;
        int ret = 0;
 
        DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
@@ -1730,8 +1753,13 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
                return -EINVAL;
        }
 
+       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+
+       if (bo_type == drm_bo_type_user)
+               req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+
        ret = drm_buffer_object_create(file_priv->head->dev,
-                                      req->size, drm_bo_type_dc, req->mask,
+                                      req->size, bo_type, req->mask,
                                       req->hint, req->page_alignment,
                                       req->buffer_start, &entry);
        if (ret)
@@ -2186,6 +2214,13 @@ int drm_bo_driver_finish(struct drm_device * dev)
                DRM_DEBUG("Unfenced list was clean\n");
        }
       out:
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+       unlock_page(bm->dummy_read_page);
+#else
+       ClearPageReserved(bm->dummy_read_page);
+#endif
+       __free_page(bm->dummy_read_page);
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
@@ -2203,11 +2238,24 @@ int drm_bo_driver_init(struct drm_device * dev)
        struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
 
+       bm->dummy_read_page = NULL;
        drm_bo_init_lock(&bm->bm_lock);
        mutex_lock(&dev->struct_mutex);
        if (!driver)
                goto out_unlock;
 
+       bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+       if (!bm->dummy_read_page) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+       SetPageLocked(bm->dummy_read_page);
+#else
+       SetPageReserved(bm->dummy_read_page);
+#endif
+
        /*
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
@@ -2462,11 +2510,15 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
 
 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
 {
-       struct drm_map_list *list = &bo->map_list;
+        struct drm_map_list *list;
        drm_local_map_t *map;
        struct drm_device *dev = bo->dev;
 
        DRM_ASSERT_LOCKED(&dev->struct_mutex);
+       if (bo->type != drm_bo_type_dc)
+               return;
+
+       list = &bo->map_list;
        if (list->user_token) {
                drm_ht_remove_item(&dev->map_hash, &list->hash);
                list->user_token = 0;
index cea811e..0a05e5f 100644 (file)
@@ -275,6 +275,8 @@ typedef struct drm_ttm_backend {
 } drm_ttm_backend_t;
 
 struct drm_ttm {
+        struct mm_struct *user_mm;
+       struct page *dummy_read_page;
        struct page **pages;
        uint32_t page_flags;
        unsigned long num_pages;
@@ -300,6 +302,12 @@ extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
 extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
 extern void drm_ttm_cache_flush(void);
 extern int drm_ttm_populate(struct drm_ttm * ttm);
+extern int drm_ttm_set_user(struct drm_ttm *ttm,
+                           struct task_struct *tsk,
+                           int write,
+                           unsigned long start,
+                           unsigned long num_pages,
+                           struct page *dummy_read_page);
 
 /*
  * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
@@ -320,11 +328,15 @@ extern int drm_destroy_ttm(struct drm_ttm * ttm);
  * Page flags.
  */
 
-#define DRM_TTM_PAGE_UNCACHED 0x01
-#define DRM_TTM_PAGE_USED     0x02
-#define DRM_TTM_PAGE_BOUND    0x04
-#define DRM_TTM_PAGE_PRESENT  0x08
-#define DRM_TTM_PAGE_VMALLOC  0x10
+#define DRM_TTM_PAGE_UNCACHED   (1 << 0)
+#define DRM_TTM_PAGE_USED       (1 << 1)
+#define DRM_TTM_PAGE_BOUND      (1 << 2)
+#define DRM_TTM_PAGE_PRESENT    (1 << 3)
+#define DRM_TTM_PAGE_VMALLOC    (1 << 4)
+#define DRM_TTM_PAGE_USER       (1 << 5)
+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+#define DRM_TTM_PAGE_USER_DMA   (1 << 8)
 
 /***************************************************
  * Buffer objects. (drm_bo.c, drm_bo_move.c)
@@ -447,6 +459,7 @@ struct drm_buffer_manager {
        uint32_t fence_type;
        unsigned long cur_pages;
        atomic_t count;
+       struct page *dummy_read_page;
 };
 
 struct drm_bo_driver {
index fd03f6e..4d51f9f 100644 (file)
@@ -139,15 +139,74 @@ static int drm_set_caching(struct drm_ttm * ttm, int noncached)
        return 0;
 }
 
+
+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
+{
+       struct mm_struct *mm = ttm->user_mm;
+       int write;
+       int dirty;
+       struct page *page;
+       int i;
+
+       BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
+       write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
+       dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
+
+       down_read(&mm->mmap_sem);
+       for (i=0; i<ttm->num_pages; ++i) {
+               page = ttm->pages[i];
+               if (page == NULL)
+                       continue;
+
+               if (page == ttm->dummy_read_page) {
+                       BUG_ON(write);
+                       continue;
+               }
+
+               if (write && dirty && !PageReserved(page))
+                       SetPageDirty(page);
+
+               ttm->pages[i] = NULL;
+               page_cache_release(page);
+       }
+       up_read(&mm->mmap_sem);
+}
+
+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
+{
+       int i;
+       struct drm_buffer_manager *bm = &ttm->dev->bm;
+       struct page **cur_page;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               cur_page = ttm->pages + i;
+               if (*cur_page) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+                       unlock_page(*cur_page);
+#else
+                       ClearPageReserved(*cur_page);
+#endif
+                       if (page_count(*cur_page) != 1) {
+                               DRM_ERROR("Erroneous page count. "
+                                         "Leaking pages.\n");
+                       }
+                       if (page_mapped(*cur_page)) {
+                               DRM_ERROR("Erroneous map count. "
+                                         "Leaking page mappings.\n");
+                       }
+                       __free_page(*cur_page);
+                       drm_free_memctl(PAGE_SIZE);
+                       --bm->cur_pages;
+               }
+       }
+}
+
 /*
  * Free all resources associated with a ttm.
  */
 
 int drm_destroy_ttm(struct drm_ttm * ttm)
 {
-
-       int i;
-       struct page **cur_page;
        struct drm_ttm_backend *be;
 
        if (!ttm)
@@ -160,31 +219,14 @@ int drm_destroy_ttm(struct drm_ttm * ttm)
        }
 
        if (ttm->pages) {
-               struct drm_buffer_manager *bm = &ttm->dev->bm;
                if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
                        drm_set_caching(ttm, 0);
 
-               for (i = 0; i < ttm->num_pages; ++i) {
-                       cur_page = ttm->pages + i;
-                       if (*cur_page) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
-                               unlock_page(*cur_page);
-#else
-                               ClearPageReserved(*cur_page);
-#endif
-                               if (page_count(*cur_page) != 1) {
-                                       DRM_ERROR("Erroneous page count. "
-                                                 "Leaking pages.\n");
-                               }
-                               if (page_mapped(*cur_page)) {
-                                       DRM_ERROR("Erroneous map count. "
-                                                 "Leaking page mappings.\n");
-                               }
-                               __free_page(*cur_page);
-                               drm_free_memctl(PAGE_SIZE);
-                               --bm->cur_pages;
-                       }
-               }
+               if (ttm->page_flags & DRM_TTM_PAGE_USER)
+                       drm_ttm_free_user_pages(ttm);
+               else
+                       drm_ttm_free_alloced_pages(ttm);
+
                ttm_free_pages(ttm);
        }
 
@@ -209,6 +251,49 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
 }
 EXPORT_SYMBOL(drm_ttm_get_page);
 
+
+
+
+int drm_ttm_set_user(struct drm_ttm *ttm,
+                    struct task_struct *tsk,
+                    int write,
+                    unsigned long start,
+                    unsigned long num_pages,
+                    struct page *dummy_read_page)
+{
+       struct mm_struct *mm = tsk->mm;
+       int ret;
+       int i;
+
+       BUG_ON(num_pages != ttm->num_pages);
+
+       ttm->user_mm = mm;
+       ttm->dummy_read_page = dummy_read_page;
+       ttm->page_flags = DRM_TTM_PAGE_USER |
+               ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
+
+
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(tsk, mm, start, num_pages,
+                            write, 0, ttm->pages, NULL);
+       up_read(&mm->mmap_sem);
+
+       if (ret != num_pages && write) {
+               drm_ttm_free_user_pages(ttm);
+               return -ENOMEM;
+       }
+
+       for (i=0; i<num_pages; ++i) {
+               if (ttm->pages[i] == NULL) {
+                       ttm->pages[i] = ttm->dummy_read_page;
+               }
+       }
+
+       return 0;
+}
+
+
+
 int drm_ttm_populate(struct drm_ttm * ttm)
 {
        struct page *page;
@@ -340,7 +425,8 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
        }
 
        ttm->state = ttm_bound;
-
+       if (ttm->page_flags & DRM_TTM_PAGE_USER)
+               ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
        return 0;
 }