1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31 int drm_map_page_into_agp(struct page *page)
34 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
35 /* Caller's responsibility to call global_flush_tlb() for
36 * performance reasons */
40 int drm_unmap_page_from_agp(struct page *page)
43 i = change_page_attr(page, 1, PAGE_KERNEL);
44 /* Caller's responsibility to call global_flush_tlb() for
45 * performance reasons */
51 pgprot_t vm_get_page_prot(unsigned long vm_flags)
54 static pgprot_t drm_protection_map[16] = {
55 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
56 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
59 return drm_protection_map[vm_flags & 0x0F];
61 extern pgprot_t protection_map[];
62 return protection_map[vm_flags & 0x0F];
66 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
68 static int drm_pte_is_clear(struct vm_area_struct *vma,
71 struct mm_struct *mm = vma->vm_mm;
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80 spin_lock(&mm->page_table_lock);
85 pgd = pgd_offset(mm, addr);
88 pud = pud_offset(pgd, addr);
91 pmd = pmd_offset(pud, addr);
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
95 pte = pte_offset_map(pmd, addr);
97 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
101 ret = pte_none(*pte);
102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
105 spin_unlock(&mm->page_table_lock);
107 pte_unmap_unlock(pte, ptl);
113 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
114 unsigned long pfn, pgprot_t pgprot)
117 if (!drm_pte_is_clear(vma, addr))
120 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
127 struct page *dummy_page;
130 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
132 struct page * get_nopage_retry(void)
134 if (atomic_read(&drm_np_retry.present) == 0) {
135 struct page *page = alloc_page(GFP_KERNEL);
138 spin_lock(&drm_np_retry.lock);
139 drm_np_retry.dummy_page = page;
140 atomic_set(&drm_np_retry.present,1);
141 spin_unlock(&drm_np_retry.lock);
143 get_page(drm_np_retry.dummy_page);
144 return drm_np_retry.dummy_page;
147 void free_nopage_retry(void)
149 if (atomic_read(&drm_np_retry.present) == 1) {
150 spin_lock(&drm_np_retry.lock);
151 __free_page(drm_np_retry.dummy_page);
152 drm_np_retry.dummy_page = NULL;
153 atomic_set(&drm_np_retry.present, 0);
154 spin_unlock(&drm_np_retry.lock);
159 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
161 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
162 unsigned long address,
165 struct fault_data data;
168 *type = VM_FAULT_MINOR;
170 data.address = address;
172 drm_vm_ttm_fault(vma, &data);
176 case VM_FAULT_SIGBUS:
177 return NOPAGE_SIGBUS;
182 return NOPAGE_REFAULT;
187 #ifdef DRM_ODD_MM_COMPAT
189 typedef struct p_mm_entry {
190 struct list_head head;
191 struct mm_struct *mm;
196 typedef struct vma_entry {
197 struct list_head head;
198 struct vm_area_struct *vma;
202 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
203 unsigned long address,
206 drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
207 unsigned long page_offset;
210 drm_buffer_manager_t *bm;
214 * FIXME: Check can't map aperture flag.
218 *type = VM_FAULT_MINOR;
223 if (address > vma->vm_end)
224 return NOPAGE_SIGBUS;
226 ttm = (drm_ttm_t *) map->offset;
228 mutex_lock(&dev->struct_mutex);
229 drm_fixup_ttm_caching(ttm);
230 BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
233 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
234 page = ttm->pages[page_offset];
237 if (drm_alloc_memctl(PAGE_SIZE)) {
241 page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
243 drm_free_memctl(PAGE_SIZE);
253 mutex_unlock(&dev->struct_mutex);
260 int drm_ttm_map_bound(struct vm_area_struct *vma)
262 drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
263 drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
266 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
267 unsigned long pfn = ttm->aper_offset +
268 (ttm->be->aperture_base >> PAGE_SHIFT);
269 pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
271 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
272 vma->vm_end - vma->vm_start,
279 int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
281 p_mm_entry_t *entry, *n_entry;
282 vma_entry_t *v_entry;
283 drm_local_map_t *map = (drm_local_map_t *)
284 vma->vm_private_data;
285 struct mm_struct *mm = vma->vm_mm;
287 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
289 DRM_ERROR("Allocation of vma pointer entry failed\n");
293 map->handle = (void *) v_entry;
294 list_add_tail(&v_entry->head, &ttm->vma_list);
296 list_for_each_entry(entry, &ttm->p_mm_list, head) {
297 if (mm == entry->mm) {
298 atomic_inc(&entry->refcount);
300 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
303 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
305 DRM_ERROR("Allocation of process mm pointer entry failed\n");
308 INIT_LIST_HEAD(&n_entry->head);
311 atomic_set(&n_entry->refcount, 0);
312 list_add_tail(&n_entry->head, &entry->head);
317 void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
319 p_mm_entry_t *entry, *n;
320 vma_entry_t *v_entry, *v_n;
322 struct mm_struct *mm = vma->vm_mm;
324 list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
325 if (v_entry->vma == vma) {
327 list_del(&v_entry->head);
328 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
334 list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
335 if (mm == entry->mm) {
336 if (atomic_add_negative(-1, &entry->refcount)) {
337 list_del(&entry->head);
338 BUG_ON(entry->locked);
339 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
349 int drm_ttm_lock_mm(drm_ttm_t * ttm)
354 list_for_each_entry(entry, &ttm->p_mm_list, head) {
355 BUG_ON(entry->locked);
356 if (!down_write_trylock(&entry->mm->mmap_sem)) {
366 list_for_each_entry(entry, &ttm->p_mm_list, head) {
369 up_write(&entry->mm->mmap_sem);
374 * Possible deadlock. Try again. Our callers should handle this
381 void drm_ttm_unlock_mm(drm_ttm_t * ttm)
385 list_for_each_entry(entry, &ttm->p_mm_list, head) {
386 BUG_ON(!entry->locked);
387 up_write(&entry->mm->mmap_sem);
392 int drm_ttm_remap_bound(drm_ttm_t *ttm)
394 vma_entry_t *v_entry;
397 list_for_each_entry(v_entry, &ttm->vma_list, head) {
398 ret = drm_ttm_map_bound(v_entry->vma);
403 drm_ttm_unlock_mm(ttm);
407 void drm_ttm_finish_unmap(drm_ttm_t *ttm)
409 vma_entry_t *v_entry;
411 if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
414 list_for_each_entry(v_entry, &ttm->vma_list, head) {
415 v_entry->vma->vm_flags &= ~VM_PFNMAP;
417 drm_ttm_unlock_mm(ttm);