1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
29 #include <asm/pgtable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
34 void pgd_clear_bad(pgd_t * pgd)
40 void pud_clear_bad(pud_t * pud)
46 void pmd_clear_bad(pmd_t * pmd)
53 static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
54 unsigned long addr, unsigned long end)
60 pte = pte_offset_map(pmd, addr);
62 if (pte_present(*pte)) {
66 ptep_get_and_clear(mm, addr, pte);
68 page = pfn_to_page(pfn);
69 if (atomic_add_negative(-1, &page->_mapcount)) {
70 if (page_test_and_clear_dirty(page))
72 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
73 dec_zone_page_state(page, NR_FILE_MAPPED);
75 dec_page_state(nr_mapped);
80 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
81 dec_mm_counter(mm, file_rss);
82 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
83 dec_mm_counter(mm, rss);
89 } while (pte++, addr += PAGE_SIZE, addr != end);
93 static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud,
94 unsigned long addr, unsigned long end)
99 pmd = pmd_offset(pud, addr);
101 next = pmd_addr_end(addr, end);
102 if (pmd_none_or_clear_bad(pmd))
104 change_pte_range(mm, pmd, addr, next);
105 } while (pmd++, addr = next, addr != end);
108 static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd,
109 unsigned long addr, unsigned long end)
114 pud = pud_offset(pgd, addr);
116 next = pud_addr_end(addr, end);
117 if (pud_none_or_clear_bad(pud))
119 change_pmd_range(mm, pud, addr, next);
120 } while (pud++, addr = next, addr != end);
124 * This function should be called with all relevant spinlocks held.
127 void drm_clear_vma(struct vm_area_struct *vma,
128 unsigned long addr, unsigned long end)
130 struct mm_struct *mm = vma->vm_mm;
133 #if defined(flush_tlb_mm) || !defined(MODULE)
134 unsigned long start = addr;
137 pgd = pgd_offset(mm, addr);
138 flush_cache_range(vma, addr, end);
140 next = pgd_addr_end(addr, end);
141 if (pgd_none_or_clear_bad(pgd))
143 change_pud_range(mm, pgd, addr, next);
144 } while (pgd++, addr = next, addr != end);
145 #if defined(flush_tlb_mm) || !defined(MODULE)
146 flush_tlb_range(vma, addr, end);
150 pgprot_t vm_get_page_prot(unsigned long vm_flags)
153 static pgprot_t drm_protection_map[16] = {
154 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
155 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
158 return drm_protection_map[vm_flags & 0x0F];
160 extern pgprot_t protection_map[];
161 return protection_map[vm_flags & 0x0F];