Simplify external ttm page allocation.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 8 Feb 2007 12:29:08 +0000 (13:29 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 8 Feb 2007 12:29:08 +0000 (13:29 +0100)
Implement a memcpy fallback for copying between buffers.

linux-core/drmP.h
linux-core/drm_bo.c
linux-core/drm_bo_move.c
linux-core/drm_compat.c
linux-core/drm_ttm.c
linux-core/drm_ttm.h
linux-core/drm_vm.c

index d3a9a2a..aff10b6 100644 (file)
@@ -1522,7 +1522,11 @@ extern int drm_bo_move_ttm(drm_device_t *dev,
                           int no_wait,
                           drm_bo_mem_reg_t *old_mem,
                           drm_bo_mem_reg_t *new_mem);
-
+extern int drm_bo_move_memcpy(drm_device_t *dev, 
+                             drm_ttm_t *ttm, int evict,
+                             int no_wait,
+                             drm_bo_mem_reg_t *old_mem,
+                             drm_bo_mem_reg_t *new_mem);
 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
 extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
 
index f4147be..845db3f 100644 (file)
@@ -180,13 +180,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t *bo,
                ret = dev->driver->bo_driver->move(dev, bo->ttm, evict, 
                                                   no_wait, &bo->mem, mem);
        } else {
-               ret = -EINVAL;
-               DRM_ERROR("Unsupported function\n");
-#if 0
                ret = drm_bo_move_memcpy(dev, bo->ttm, evict, no_wait, 
                                         &bo->mem, mem);
-               ret = 0;
-#endif
        }
 
        if (old_is_pci || new_is_pci)
@@ -2185,74 +2180,6 @@ int drm_bo_pci_offset(drm_device_t *dev,
 
 
 /**
- * \c Return a kernel virtual address to the buffer object PCI memory.
- *
- * \param bo The buffer object.
- * \return Failure indication.
- * 
- * Returns -EINVAL if the buffer object is currently not mappable.
- * Returns -ENOMEM if the ioremap operation failed.
- * Otherwise returns zero.
- * 
- * After a successfull call, bo->iomap contains the virtual address, or NULL
- * if the buffer object content is not accessible through PCI space. 
- * Call bo->mutex locked.
- */
-
-#if 0
-int drm_mem_reg_ioremap(drm_bo_mem_reg_t *mem)
-{
-       drm_device_t *dev = bo->dev;
-       drm_buffer_manager_t *bm = &dev->bm;
-       drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; 
-       unsigned long bus_offset;
-       unsigned long bus_size;
-       unsigned long bus_base;
-       int ret;
-
-       BUG_ON(bo->iomap);
-
-       ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
-       if (ret || bus_size == 0) 
-               return ret;
-
-       if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
-               bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset);
-       else {
-               bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size);
-               if (bo->iomap)
-                       return -ENOMEM;
-       }
-       
-       return 0;
-}
-
-/**
- * \c Unmap mapping obtained using drm_bo_ioremap
- *
- * \param bo The buffer object.
- *
- * Call bo->mutex locked.
- */
-
-void drm_bo_iounmap(drm_buffer_object_t *bo)
-{
-       drm_device_t *dev = bo->dev;
-       drm_buffer_manager_t *bm; 
-       drm_mem_type_manager_t *man; 
-
-
-       bm = &dev->bm;
-       man = &bm->man[bo->mem.mem_type];
-       
-       if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) 
-               iounmap(bo->iomap);
-       
-       bo->iomap = NULL;
-}
-#endif
-
-/**
  * \c Kill all user-space virtual mappings of this buffer object.
  *
  * \param bo The buffer object.
index b4486bf..23e8c0f 100644 (file)
@@ -72,3 +72,180 @@ int drm_bo_move_ttm(drm_device_t *dev,
        DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
        return 0;
 }
+
+
+/**
+ * \c Return a kernel virtual address to the buffer object PCI memory.
+ *
+ * \param bo The buffer object.
+ * \return Failure indication.
+ * 
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Returns -ENOMEM if the ioremap operation failed.
+ * Otherwise returns zero.
+ * 
+ * After a successfull call, bo->iomap contains the virtual address, or NULL
+ * if the buffer object content is not accessible through PCI space. 
+ * Call bo->mutex locked.
+ */
+
+
+int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual)
+{
+       drm_buffer_manager_t *bm = &dev->bm;
+       drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; 
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long bus_base;
+       int ret;
+       void *addr;
+
+       *virtual = NULL;
+       ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
+       if (ret || bus_size == 0) 
+               return ret;
+
+       if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+               addr = (void *) (((u8 *)man->io_addr) + bus_offset);
+       else {
+               addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+               if (!addr)
+                       return -ENOMEM;
+       }
+       *virtual = addr;
+       return 0;
+}
+
+
+/**
+ * \c Unmap mapping obtained using drm_bo_ioremap
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem,
+                        void *virtual)
+{
+       drm_buffer_manager_t *bm; 
+       drm_mem_type_manager_t *man; 
+
+
+       bm = &dev->bm;
+       man = &bm->man[mem->mem_type];
+       
+       if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) 
+               iounmap(virtual);
+}
+
+
+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+       uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT));
+       uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT));
+
+       int i;
+       for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 
+               iowrite32(ioread32(srcP++), dstP++);
+       return 0;
+}
+
+static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) 
+{
+       struct page *d = drm_ttm_get_page(ttm, page);
+       void *dst;
+
+       if (!d)
+               return -ENOMEM;
+
+       src = (void *)((unsigned long) src + (page << PAGE_SHIFT));
+       dst = kmap(d);
+       if (!dst)
+               return -ENOMEM;
+
+       memcpy_fromio(dst, src, PAGE_SIZE);
+       kunmap(dst);
+       return 0;
+}
+
+static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) 
+{
+       struct page *s = drm_ttm_get_page(ttm, page);
+       void *src;
+
+       if (!s)
+               return -ENOMEM;
+
+       dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT));
+       src = kmap(s);
+       if (!src)
+               return -ENOMEM;
+
+       memcpy_toio(dst, src, PAGE_SIZE);
+       kunmap(src);
+       return 0;
+}
+
+       
+int drm_bo_move_memcpy(drm_device_t *dev,
+                      drm_ttm_t *ttm, 
+                      int evict,
+                      int no_wait,
+                      drm_bo_mem_reg_t *old_mem,
+                      drm_bo_mem_reg_t *new_mem)
+{
+       void *old_iomap;
+       void *new_iomap;
+       int ret;
+       uint32_t save_flags = old_mem->flags;
+       uint32_t save_mask = old_mem->mask;
+       unsigned long i;
+       unsigned long page;
+       unsigned long add = 0;
+       int dir;
+       
+       
+       ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
+       if (ret)
+               return ret;
+       ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
+       if (ret) 
+               goto out;
+
+       if (old_iomap == NULL && new_iomap == NULL)
+               goto out2;
+       
+       add = 0;
+       dir = 1;
+
+       if ((old_mem->mem_type == new_mem->mem_type) && 
+           (new_mem->mm_node->start < 
+            old_mem->mm_node->start +  old_mem->mm_node->size)) {
+               dir = -1;
+               add = new_mem->num_pages - 1;
+       }
+
+       for (i=0; i < new_mem->num_pages; ++i) {
+               page = i*dir + add; 
+               if (old_iomap == NULL) 
+                       ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
+               else if (new_iomap == NULL)
+                       ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
+               else 
+                       ret = drm_copy_io_page(new_iomap, old_iomap, page);
+               if (ret)
+                       goto out1;
+       }
+                               
+out2:  
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+       old_mem->mask = save_mask;
+       DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+out1:
+       drm_mem_reg_iounmap(dev, new_mem, &new_iomap);
+out:
+       drm_mem_reg_iounmap(dev, old_mem, old_iomap);
+       return ret;
+}
index 044cf4a..d0bca67 100644 (file)
@@ -220,7 +220,6 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
        unsigned long page_offset;
        struct page *page;
        drm_ttm_t *ttm; 
-       drm_buffer_manager_t *bm;
        drm_device_t *dev;
 
        mutex_lock(&bo->mutex);
@@ -241,20 +240,13 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
                goto out_unlock;
        }
 
-       bm = &dev->bm;
        ttm = bo->ttm;
        drm_ttm_fixup_caching(ttm);
        page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
-       page = ttm->pages[page_offset];
-
+       page = drm_ttm_get_page(ttm, page_offset);
        if (!page) {
-               page = drm_ttm_alloc_page();
-               if (!page) {
-                       page = NOPAGE_OOM;
-                       goto out_unlock;
-               }
-               ttm->pages[page_offset] = page;
-               ++bm->cur_pages;                
+               page = NOPAGE_OOM;
+               goto out_unlock;
        }
 
        get_page(page);
index 3428229..5c270be 100644 (file)
@@ -80,7 +80,7 @@ static void ttm_free_pages(drm_ttm_t *ttm)
 }
 
 
-struct page *drm_ttm_alloc_page(void)
+static struct page *drm_ttm_alloc_page(void)
 {
        struct page *page;
 
@@ -192,27 +192,37 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
        return 0;
 }
 
+struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index)
+{
+       struct page *p;
+       drm_buffer_manager_t *bm = &ttm->dev->bm;
+
+       p = ttm->pages[index];
+       if (!p) {
+               p = drm_ttm_alloc_page();
+               if (!p)
+                       return NULL;
+               ttm->pages[index] = p;
+               ++bm->cur_pages;
+       }
+       return p;
+}
+
+
 static int drm_ttm_populate(drm_ttm_t * ttm)
 {
        struct page *page;
        unsigned long i;
-       drm_buffer_manager_t *bm;
        drm_ttm_backend_t *be;
 
        if (ttm->state != ttm_unpopulated)
                return 0;
 
-       bm = &ttm->dev->bm;
        be = ttm->be;
        for (i = 0; i < ttm->num_pages; ++i) {
-               page = ttm->pages[i];
-               if (!page) {
-                       page = drm_ttm_alloc_page();
-                       if (!page)
-                               return -ENOMEM;
-                       ttm->pages[i] = page;
-                       ++bm->cur_pages;
-               }
+               page = drm_ttm_get_page(ttm, i);
+               if (!page)
+                       return -ENOMEM;
        }
        be->populate(be, ttm->num_pages, ttm->pages);
        ttm->state = ttm_unbound;
index 6f62712..37003c4 100644 (file)
@@ -82,11 +82,11 @@ typedef struct drm_ttm {
 
 
 extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
-extern struct page *drm_ttm_alloc_page(void);
 extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
 extern void drm_ttm_unbind(drm_ttm_t * ttm);
 extern void drm_ttm_evict(drm_ttm_t * ttm);
 extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
+extern struct page *drm_ttm_get_page(drm_ttm_t *ttm, int index);
 
 /*
  * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, 
index 416ac4a..25779ec 100644 (file)
@@ -736,7 +736,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
        unsigned long page_offset;
        struct page *page = NULL;
        drm_ttm_t *ttm; 
-       drm_buffer_manager_t *bm;
        drm_device_t *dev;
        unsigned long pfn;
        int err;
@@ -768,19 +767,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
                pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
                pgprot = drm_io_prot(_DRM_AGP, vma);
        } else {
-               bm = &dev->bm;
                ttm = bo->ttm;
 
                drm_ttm_fixup_caching(ttm);
-               page = ttm->pages[page_offset];
+               page = drm_ttm_get_page(ttm, page_offset);
                if (!page) {
-                       page = drm_ttm_alloc_page();
-                       if (!page) {
-                               data->type = VM_FAULT_OOM;
-                               goto out_unlock;
-                       }
-                       ttm->pages[page_offset] = page;
-                       ++bm->cur_pages;
+                       data->type = VM_FAULT_OOM;
+                       goto out_unlock;
                }
                pfn = page_to_pfn(page);
                pgprot = vma->vm_page_prot;