1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
29 #include <asm/pgtable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
34 void pgd_clear_bad(pgd_t * pgd)
40 void pud_clear_bad(pud_t * pud)
46 void pmd_clear_bad(pmd_t * pmd)
53 static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
54 unsigned long addr, unsigned long end)
58 pte = pte_offset_map(pmd, addr);
60 if (pte_present(*pte)) {
63 ptep_get_and_clear(mm, addr, pte);
64 lazy_mmu_prot_update(ptent);
66 } while (pte++, addr += PAGE_SIZE, addr != end);
70 static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud,
71 unsigned long addr, unsigned long end)
76 pmd = pmd_offset(pud, addr);
78 next = pmd_addr_end(addr, end);
79 if (pmd_none_or_clear_bad(pmd))
81 change_pte_range(mm, pmd, addr, next);
82 } while (pmd++, addr = next, addr != end);
85 static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd,
86 unsigned long addr, unsigned long end)
91 pud = pud_offset(pgd, addr);
93 next = pud_addr_end(addr, end);
94 if (pud_none_or_clear_bad(pud))
96 change_pmd_range(mm, pud, addr, next);
97 } while (pud++, addr = next, addr != end);
101 * This function should be called with all relevant spinlocks held.
104 void drm_clear_vma(struct vm_area_struct *vma,
105 unsigned long addr, unsigned long end)
107 struct mm_struct *mm = vma->vm_mm;
110 #if defined(flush_tlb_mm) || !defined(MODULE)
111 unsigned long start = addr;
114 pgd = pgd_offset(mm, addr);
115 flush_cache_range(vma, addr, end);
117 next = pgd_addr_end(addr, end);
118 if (pgd_none_or_clear_bad(pgd))
120 change_pud_range(mm, pgd, addr, next);
121 } while (pgd++, addr = next, addr != end);
122 #if defined(flush_tlb_mm) || !defined(MODULE)
123 flush_tlb_range(vma, addr, end);
127 pgprot_t vm_get_page_prot(unsigned long vm_flags)
130 static pgprot_t drm_protection_map[16] = {
131 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
132 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
135 return drm_protection_map[vm_flags & 0x0F];
137 extern pgprot_t protection_map[];
138 return protection_map[vm_flags & 0x0F];