}
#endif
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+int drm_map_page_into_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+#endif
+
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <asm/agp.h>
#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
+/*
+ * These are similar to the current kernel gatt pages allocator, only that we
+ * want a struct page pointer instead of a virtual address. This allows for pages
+ * that are not in the kernel linear map.
+ */
+
+#define drm_alloc_gatt_pages(order) virt_to_page(alloc_gatt_pages(order))
+#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
#endif
if (ret)
break;
}
- global_flush_tlb();
return ret;
}
unsigned long num_pages)
{
struct list_head *list;
- struct page **first_page = ttm->pages + page_offset;
- struct page **last_page = ttm->pages + (page_offset + num_pages);
- struct page **cur_page;
+
#if !defined(flush_tlb_mm) && defined(MODULE)
int flush_tlb = 0;
#endif
global_flush_tlb();
#endif
- for (cur_page = first_page; cur_page != last_page; ++cur_page) {
- if (page_mapped(*cur_page)) {
- DRM_ERROR("Mapped page detected. Map count is %d\n",
- page_mapcount(*cur_page));
- return -1;
- }
- }
return 0;
}
if (ttm->page_flags &&
(ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
*cur_page && !PageHighMem(*cur_page)) {
- change_page_attr(*cur_page, 1, PAGE_KERNEL);
+ unmap_page_from_agp(*cur_page);
do_tlbflush = 1;
}
if (*cur_page) {
* End debugging.
*/
- __free_page(*cur_page);
+ drm_free_gatt_pages(*cur_page, 0);
--bm->cur_pages;
}
}
if (do_tlbflush)
- global_flush_tlb();
+ flush_agp_mappings();
ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm->pages = NULL;
}
if (ttm->page_flags) {
- ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), DRM_MEM_TTM);
+ ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags),
+ DRM_MEM_TTM);
ttm->page_flags = NULL;
}
{
int i, cur;
struct page **cur_page;
- pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL;
for (i = 0; i < num_pages; ++i) {
cur = page_offset + i;
DRM_TTM_PAGE_UNCACHED) != noncached) {
DRM_MASK_VAL(ttm->page_flags[cur],
DRM_TTM_PAGE_UNCACHED, noncached);
- change_page_attr(*cur_page, 1, attr);
+ if (noncached) {
+ map_page_into_agp(*cur_page);
+ } else {
+ unmap_page_from_agp(*cur_page);
+ }
}
}
}
if (do_tlbflush)
- global_flush_tlb();
+ flush_agp_mappings();
return 0;
}
drm_destroy_ttm_region(entry);
return -ENOMEM;
}
- *cur_page = alloc_page(GFP_KERNEL);
+ *cur_page = drm_alloc_gatt_pages(0);
if (!*cur_page) {
DRM_ERROR("Page allocation failed\n");
drm_destroy_ttm_region(entry);