Use dummy_read_page for unpopulated kernel-allocated ttm pages.
authorKeith Packard <keithp@keithp.com>
Sun, 16 Dec 2007 09:47:51 +0000 (01:47 -0800)
committerKeith Packard <keithp@keithp.com>
Fri, 21 Dec 2007 20:16:29 +0000 (12:16 -0800)
Previously, dummy_read_page was used only for read-only user allocations; it
filled in pages that were not present in the user address map (presumably,
these were allocated but never written to pages).

This patch allows them to be used for read-only ttms allocated from the
kernel, so that applications can over-allocate buffers without forcing every
page to be allocated.

linux-core/drm_agpsupport.c
linux-core/drm_objects.h
linux-core/drm_ttm.c
linux-core/nouveau_sgdma.c

index e8bfaea..0218701 100644 (file)
@@ -505,12 +505,14 @@ static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
 
 
 static int drm_agp_populate(struct drm_ttm_backend *backend,
-                           unsigned long num_pages, struct page **pages)
+                           unsigned long num_pages, struct page **pages,
+                           struct page *dummy_read_page)
 {
        struct drm_agp_ttm_backend *agp_be =
                container_of(backend, struct drm_agp_ttm_backend, backend);
        struct page **cur_page, **last_page = pages + num_pages;
        DRM_AGP_MEM *mem;
+       int dummy_page_count = 0;
 
        if (drm_alloc_memctl(num_pages * sizeof(void *)))
                return -1;
@@ -528,8 +530,16 @@ static int drm_agp_populate(struct drm_ttm_backend *backend,
 
        DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
        mem->page_count = 0;
-       for (cur_page = pages; cur_page < last_page; ++cur_page)
-               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+       for (cur_page = pages; cur_page < last_page; ++cur_page) {
+               struct page *page = *cur_page;
+               if (!page) {
+                       page = dummy_read_page;
+                       ++dummy_page_count;
+               }
+               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
+       }
+       if (dummy_page_count)
+               DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
        agp_be->mem = mem;
        return 0;
 }
index 375420d..47b6b01 100644 (file)
@@ -263,7 +263,8 @@ struct drm_ttm_backend;
 struct drm_ttm_backend_func {
        int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
        int (*populate) (struct drm_ttm_backend *backend,
-                        unsigned long num_pages, struct page **pages);
+                        unsigned long num_pages, struct page **pages,
+                        struct page *dummy_read_page);
        void (*clear) (struct drm_ttm_backend *backend);
        int (*bind) (struct drm_ttm_backend *backend,
                     struct drm_bo_mem_reg *bo_mem);
index c1fc13f..a9d8733 100644 (file)
@@ -262,7 +262,6 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
 {
        struct mm_struct *mm = tsk->mm;
        int ret;
-       int i;
        int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
 
        BUG_ON(num_pages != ttm->num_pages);
@@ -278,11 +277,6 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
                return -ENOMEM;
        }
 
-       for (i = 0; i < num_pages; ++i) {
-               if (ttm->pages[i] == NULL)
-                       ttm->pages[i] = ttm->dummy_read_page;
-       }
-
        return 0;
 }
 
@@ -304,12 +298,14 @@ int drm_ttm_populate(struct drm_ttm *ttm)
                return 0;
 
        be = ttm->be;
-       for (i = 0; i < ttm->num_pages; ++i) {
-               page = drm_ttm_get_page(ttm, i);
-               if (!page)
-                       return -ENOMEM;
+       if (ttm->page_flags & DRM_TTM_PAGE_WRITE) {
+               for (i = 0; i < ttm->num_pages; ++i) {
+                       page = drm_ttm_get_page(ttm, i);
+                       if (!page)
+                               return -ENOMEM;
+               }
        }
-       be->func->populate(be, ttm->num_pages, ttm->pages);
+       be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
        ttm->state = ttm_unbound;
        return 0;
 }
index f3bf534..6c61819 100644 (file)
@@ -25,7 +25,7 @@ nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
 
 static int
 nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
-                      struct page **pages)
+                      struct page **pages, struct page *dummy_read_page)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
        int p, d, o;
@@ -41,8 +41,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
        nvbe->pages_populated = d = 0;
        for (p = 0; p < num_pages; p++) {
                for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
+                       struct page *page = pages[p];
+                       if (!page)
+                               page = dummy_read_page;
                        nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
-                                                        pages[p], o,
+                                                        page, o,
                                                         NV_CTXDMA_PAGE_SIZE,
                                                         PCI_DMA_BIDIRECTIONAL);
                        if (pci_dma_mapping_error(nvbe->pagelist[d])) {
@@ -299,7 +302,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
        }
        dev_priv->gart_info.sg_handle = sgreq.handle;
 
-       if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) {
+       if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
                DRM_ERROR("failed populate: %d\n", ret);
                return ret;
        }