1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
97 struct page * get_nopage_retry(void)
99 if (atomic_read(&drm_np_retry.present) == 0) {
100 struct page *page = alloc_page(GFP_KERNEL);
103 spin_lock(&drm_np_retry.lock);
104 drm_np_retry.dummy_page = page;
105 atomic_set(&drm_np_retry.present,1);
106 spin_unlock(&drm_np_retry.lock);
108 get_page(drm_np_retry.dummy_page);
109 return drm_np_retry.dummy_page;
112 void free_nopage_retry(void)
114 if (atomic_read(&drm_np_retry.present) == 1) {
115 spin_lock(&drm_np_retry.lock);
116 __free_page(drm_np_retry.dummy_page);
117 drm_np_retry.dummy_page = NULL;
118 atomic_set(&drm_np_retry.present, 0);
119 spin_unlock(&drm_np_retry.lock);
123 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
124 unsigned long address,
127 struct fault_data data;
130 *type = VM_FAULT_MINOR;
132 data.address = address;
134 drm_bo_vm_fault(vma, &data);
138 case VM_FAULT_SIGBUS:
139 return NOPAGE_SIGBUS;
144 return NOPAGE_REFAULT;
149 #if !defined(DRM_FULL_MM_COMPAT) && \
150 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
151 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
153 static int drm_pte_is_clear(struct vm_area_struct *vma,
156 struct mm_struct *mm = vma->vm_mm;
163 spin_lock(&mm->page_table_lock);
164 pgd = pgd_offset(mm, addr);
167 pud = pud_offset(pgd, addr);
170 pmd = pmd_offset(pud, addr);
173 pte = pte_offset_map(pmd, addr);
176 ret = pte_none(*pte);
179 spin_unlock(&mm->page_table_lock);
183 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
187 if (!drm_pte_is_clear(vma, addr))
190 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
195 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
198 * While waiting for the fault() handler to appear in
199 * we accomplish approximately
200 * the same wrapping it with nopfn.
203 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
204 unsigned long address)
206 struct fault_data data;
207 data.address = address;
209 (void) drm_bo_vm_fault(vma, &data);
210 if (data.type == VM_FAULT_OOM)
212 else if (data.type == VM_FAULT_SIGBUS)
224 #ifdef DRM_ODD_MM_COMPAT
227 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
228 * workaround for a single BUG statement in do_no_page in these versions. The
229 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
230 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
231 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
232 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
233 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
237 typedef struct p_mm_entry {
238 struct list_head head;
239 struct mm_struct *mm;
244 typedef struct vma_entry {
245 struct list_head head;
246 struct vm_area_struct *vma;
250 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
251 unsigned long address,
254 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
255 unsigned long page_offset;
260 mutex_lock(&bo->mutex);
263 *type = VM_FAULT_MINOR;
265 if (address > vma->vm_end) {
266 page = NOPAGE_SIGBUS;
272 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
273 DRM_ERROR("Invalid compat nopage.\n");
274 page = NOPAGE_SIGBUS;
279 drm_ttm_fixup_caching(ttm);
280 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
281 page = drm_ttm_get_page(ttm, page_offset);
289 mutex_unlock(&bo->mutex);
296 int drm_bo_map_bound(struct vm_area_struct *vma)
298 drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
300 unsigned long bus_base;
301 unsigned long bus_offset;
302 unsigned long bus_size;
304 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
305 &bus_offset, &bus_size);
309 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
310 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
311 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
312 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
313 vma->vm_end - vma->vm_start,
321 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
323 p_mm_entry_t *entry, *n_entry;
324 vma_entry_t *v_entry;
325 struct mm_struct *mm = vma->vm_mm;
327 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
329 DRM_ERROR("Allocation of vma pointer entry failed\n");
334 list_add_tail(&v_entry->head, &bo->vma_list);
336 list_for_each_entry(entry, &bo->p_mm_list, head) {
337 if (mm == entry->mm) {
338 atomic_inc(&entry->refcount);
340 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
343 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
345 DRM_ERROR("Allocation of process mm pointer entry failed\n");
348 INIT_LIST_HEAD(&n_entry->head);
351 atomic_set(&n_entry->refcount, 0);
352 list_add_tail(&n_entry->head, &entry->head);
357 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
359 p_mm_entry_t *entry, *n;
360 vma_entry_t *v_entry, *v_n;
362 struct mm_struct *mm = vma->vm_mm;
364 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
365 if (v_entry->vma == vma) {
367 list_del(&v_entry->head);
368 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
374 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
375 if (mm == entry->mm) {
376 if (atomic_add_negative(-1, &entry->refcount)) {
377 list_del(&entry->head);
378 BUG_ON(entry->locked);
379 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
389 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
394 list_for_each_entry(entry, &bo->p_mm_list, head) {
395 BUG_ON(entry->locked);
396 if (!down_write_trylock(&entry->mm->mmap_sem)) {
406 list_for_each_entry(entry, &bo->p_mm_list, head) {
409 up_write(&entry->mm->mmap_sem);
414 * Possible deadlock. Try again. Our callers should handle this
421 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
425 list_for_each_entry(entry, &bo->p_mm_list, head) {
426 BUG_ON(!entry->locked);
427 up_write(&entry->mm->mmap_sem);
432 int drm_bo_remap_bound(drm_buffer_object_t *bo)
434 vma_entry_t *v_entry;
437 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
438 list_for_each_entry(v_entry, &bo->vma_list, head) {
439 ret = drm_bo_map_bound(v_entry->vma);
448 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
450 vma_entry_t *v_entry;
452 list_for_each_entry(v_entry, &bo->vma_list, head) {
453 v_entry->vma->vm_flags &= ~VM_PFNMAP;