Adapt to architecture-specific hooks for gatt pages.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Wed, 27 Sep 2006 07:27:31 +0000 (09:27 +0200)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Wed, 27 Sep 2006 07:27:31 +0000 (09:27 +0200)
linux-core/drm_agpsupport.c
linux-core/drm_bo.c
linux-core/drm_compat.c
linux-core/drm_compat.h
linux-core/drm_ttm.c
linux-core/drm_vm.c

index 22987b0..2dd8016 100644 (file)
@@ -586,7 +586,7 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
        DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
        mem->page_count = 0;
        for (cur_page = pages; cur_page < last_page; ++cur_page) {
-               mem->memory[mem->page_count++] = page_to_phys(*cur_page);
+               mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
        }
        agp_priv->mem = mem;
        return 0;
index f479c81..4f1c417 100644 (file)
@@ -1562,7 +1562,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
                if (arg.req.tt_p_size) {
                        ret = drm_mm_init(&bm->tt_manager,
                                          arg.req.tt_p_offset,
-                                         arg.req.tt_p_size);
+                                         3000 /*arg.req.tt_p_size*/);
                        bm->has_tt = 1;
                        bm->use_tt = 1;
 
index e56f660..8dbc636 100644 (file)
@@ -160,6 +160,26 @@ void drm_clear_vma(struct vm_area_struct *vma,
 }
 #endif
 
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+int drm_map_page_into_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+        int i;
+        i = change_page_attr(page, 1, PAGE_KERNEL);
+        /* Caller's responsibility to call global_flush_tlb() for
+         * performance reasons */
+        return i;
+}
+#endif
+
 
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
index 779a700..cf84a70 100644 (file)
@@ -31,6 +31,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <asm/agp.h>
 #ifndef _DRM_COMPAT_H_
 #define _DRM_COMPAT_H_
 
@@ -245,4 +246,26 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
 
 extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
 
+/*
+ * These are similar to the current kernel gatt pages allocator, only that we
+ * want a struct page pointer instead of a virtual address. This allows for pages
+ * that are not in the kernel linear map.
+ */
+
+#define drm_alloc_gatt_pages(order) virt_to_page(alloc_gatt_pages(order))
+#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order) 
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
 #endif
index f72e7d3..6790c88 100644 (file)
@@ -170,7 +170,6 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
                if (ret)
                        break;
        }
-       global_flush_tlb();
        return ret;
 }
 
@@ -182,9 +181,7 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
                           unsigned long num_pages)
 {
        struct list_head *list;
-       struct page **first_page = ttm->pages + page_offset;
-       struct page **last_page = ttm->pages + (page_offset + num_pages);
-       struct page **cur_page;
+
 #if !defined(flush_tlb_mm) && defined(MODULE)
        int flush_tlb = 0;
 #endif
@@ -207,13 +204,6 @@ static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
                global_flush_tlb();
 #endif
 
-       for (cur_page = first_page; cur_page != last_page; ++cur_page) {
-               if (page_mapped(*cur_page)) {
-                       DRM_ERROR("Mapped page detected. Map count is %d\n",
-                                 page_mapcount(*cur_page));
-                       return -1;
-               }
-       }
        return 0;
 }
 
@@ -258,7 +248,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
                        if (ttm->page_flags &&
                            (ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
                            *cur_page && !PageHighMem(*cur_page)) {
-                               change_page_attr(*cur_page, 1, PAGE_KERNEL);
+                               unmap_page_from_agp(*cur_page);
                                do_tlbflush = 1;
                        }
                        if (*cur_page) {
@@ -278,19 +268,20 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
                                 * End debugging.
                                 */
 
-                               __free_page(*cur_page);
+                               drm_free_gatt_pages(*cur_page, 0);
                                --bm->cur_pages;
                        }
                }
                if (do_tlbflush)
-                       global_flush_tlb();
+                       flush_agp_mappings();
                ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
                         DRM_MEM_TTM);
                ttm->pages = NULL;
        }
 
        if (ttm->page_flags) {
-               ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), DRM_MEM_TTM);
+               ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), 
+                        DRM_MEM_TTM);
                ttm->page_flags = NULL;
        }
 
@@ -455,7 +446,6 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
 {
        int i, cur;
        struct page **cur_page;
-       pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL;
 
        for (i = 0; i < num_pages; ++i) {
                cur = page_offset + i;
@@ -472,12 +462,16 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
                                    DRM_TTM_PAGE_UNCACHED) != noncached) {
                                DRM_MASK_VAL(ttm->page_flags[cur],
                                             DRM_TTM_PAGE_UNCACHED, noncached);
-                               change_page_attr(*cur_page, 1, attr);
+                               if (noncached) {
+                                       map_page_into_agp(*cur_page);
+                               } else {
+                                       unmap_page_from_agp(*cur_page);
+                               }
                        }
                }
        }
        if (do_tlbflush)
-               global_flush_tlb();
+               flush_agp_mappings();
        return 0;
 }
 
@@ -612,7 +606,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
                                drm_destroy_ttm_region(entry);
                                return -ENOMEM;
                        }
-                       *cur_page = alloc_page(GFP_KERNEL);
+                       *cur_page = drm_alloc_gatt_pages(0);
                        if (!*cur_page) {
                                DRM_ERROR("Page allocation failed\n");
                                drm_destroy_ttm_region(entry);
index aed0e04..76d7fb4 100644 (file)
@@ -274,8 +274,7 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
                        return NOPAGE_OOM;
                }
                ++bm->cur_pages;
-               page = ttm->pages[page_offset] = 
-                       alloc_page(GFP_KERNEL);
+               page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
        }
        if (!page) 
                return NOPAGE_OOM;