1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31 int drm_map_page_into_agp(struct page *page)
34 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
35 /* Caller's responsibility to call global_flush_tlb() for
36 * performance reasons */
40 int drm_unmap_page_from_agp(struct page *page)
43 i = change_page_attr(page, 1, PAGE_KERNEL);
44 /* Caller's responsibility to call global_flush_tlb() for
45 * performance reasons */
51 pgprot_t vm_get_page_prot(unsigned long vm_flags)
54 static pgprot_t drm_protection_map[16] = {
55 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
56 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
59 return drm_protection_map[vm_flags & 0x0F];
61 extern pgprot_t protection_map[];
62 return protection_map[vm_flags & 0x0F];
66 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
68 static int drm_pte_is_clear(struct vm_area_struct *vma,
71 struct mm_struct *mm = vma->vm_mm;
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80 spin_lock(&mm->page_table_lock);
85 pgd = pgd_offset(mm, addr);
88 pud = pud_offset(pgd, addr);
91 pmd = pmd_offset(pud, addr);
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
95 pte = pte_offset_map(pmd, addr);
97 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
101 ret = pte_none(*pte);
102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
105 spin_unlock(&mm->page_table_lock);
107 pte_unmap_unlock(pte, ptl);
113 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
114 unsigned long pfn, pgprot_t pgprot)
117 if (!drm_pte_is_clear(vma, addr))
120 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
127 struct page *dummy_page;
130 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
132 struct page * get_nopage_retry(void)
134 if (atomic_read(&drm_np_retry.present) == 0) {
135 struct page *page = alloc_page(GFP_KERNEL);
138 spin_lock(&drm_np_retry.lock);
139 drm_np_retry.dummy_page = page;
140 atomic_set(&drm_np_retry.present,1);
141 spin_unlock(&drm_np_retry.lock);
143 get_page(drm_np_retry.dummy_page);
144 return drm_np_retry.dummy_page;
147 void free_nopage_retry(void)
149 if (atomic_read(&drm_np_retry.present) == 1) {
150 spin_lock(&drm_np_retry.lock);
151 __free_page(drm_np_retry.dummy_page);
152 drm_np_retry.dummy_page = NULL;
153 atomic_set(&drm_np_retry.present, 0);
154 spin_unlock(&drm_np_retry.lock);
159 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
161 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
162 unsigned long address,
165 struct fault_data data;
168 *type = VM_FAULT_MINOR;
170 data.address = address;
172 drm_vm_ttm_fault(vma, &data);
176 case VM_FAULT_SIGBUS:
177 return NOPAGE_SIGBUS;
182 return NOPAGE_REFAULT;