1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99 struct fault_data *data);
102 struct page * get_nopage_retry(void)
104 if (atomic_read(&drm_np_retry.present) == 0) {
105 struct page *page = alloc_page(GFP_KERNEL);
108 spin_lock(&drm_np_retry.lock);
109 drm_np_retry.dummy_page = page;
110 atomic_set(&drm_np_retry.present,1);
111 spin_unlock(&drm_np_retry.lock);
113 get_page(drm_np_retry.dummy_page);
114 return drm_np_retry.dummy_page;
117 void free_nopage_retry(void)
119 if (atomic_read(&drm_np_retry.present) == 1) {
120 spin_lock(&drm_np_retry.lock);
121 __free_page(drm_np_retry.dummy_page);
122 drm_np_retry.dummy_page = NULL;
123 atomic_set(&drm_np_retry.present, 0);
124 spin_unlock(&drm_np_retry.lock);
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129 unsigned long address,
132 struct fault_data data;
135 *type = VM_FAULT_MINOR;
137 data.address = address;
139 drm_bo_vm_fault(vma, &data);
143 case VM_FAULT_SIGBUS:
144 return NOPAGE_SIGBUS;
149 return NOPAGE_REFAULT;
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
161 struct mm_struct *mm = vma->vm_mm;
168 spin_lock(&mm->page_table_lock);
169 pgd = pgd_offset(mm, addr);
172 pud = pud_offset(pgd, addr);
175 pmd = pmd_offset(pud, addr);
178 pte = pte_offset_map(pmd, addr);
181 ret = pte_none(*pte);
184 spin_unlock(&mm->page_table_lock);
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
192 if (!drm_pte_is_clear(vma, addr))
195 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
199 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
200 struct fault_data *data)
202 unsigned long address = data->address;
203 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
204 unsigned long page_offset;
205 struct page *page = NULL;
210 unsigned long bus_base;
211 unsigned long bus_offset;
212 unsigned long bus_size;
215 mutex_lock(&bo->mutex);
217 err = drm_bo_wait(bo, 0, 1, 0);
219 data->type = (err == -EAGAIN) ?
220 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
226 * If buffer happens to be in a non-mappable location,
227 * move it to a mappable.
230 if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
231 unsigned long _end = jiffies + 3*DRM_HZ;
232 uint32_t new_mask = bo->mem.mask |
233 DRM_BO_FLAG_MAPPABLE |
234 DRM_BO_FLAG_FORCE_MAPPABLE;
237 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
238 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
241 DRM_ERROR("Timeout moving buffer to mappable location.\n");
242 data->type = VM_FAULT_SIGBUS;
247 if (address > vma->vm_end) {
248 data->type = VM_FAULT_SIGBUS;
253 err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
257 data->type = VM_FAULT_SIGBUS;
261 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
264 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
266 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
267 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
271 drm_ttm_fixup_caching(ttm);
272 page = drm_ttm_get_page(ttm, page_offset);
274 data->type = VM_FAULT_OOM;
277 pfn = page_to_pfn(page);
278 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
281 err = vm_insert_pfn(vma, address, pfn);
283 if (!err || err == -EBUSY)
284 data->type = VM_FAULT_MINOR;
286 data->type = VM_FAULT_OOM;
288 mutex_unlock(&bo->mutex);
294 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
295 !defined(DRM_FULL_MM_COMPAT)
300 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
301 unsigned long address)
303 struct fault_data data;
304 data.address = address;
306 (void) drm_bo_vm_fault(vma, &data);
307 if (data.type == VM_FAULT_OOM)
309 else if (data.type == VM_FAULT_SIGBUS)
321 #ifdef DRM_ODD_MM_COMPAT
324 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
325 * workaround for a single BUG statement in do_no_page in these versions. The
326 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
327 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
328 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
329 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
330 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
334 typedef struct p_mm_entry {
335 struct list_head head;
336 struct mm_struct *mm;
341 typedef struct vma_entry {
342 struct list_head head;
343 struct vm_area_struct *vma;
347 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
348 unsigned long address,
351 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
352 unsigned long page_offset;
357 mutex_lock(&bo->mutex);
360 *type = VM_FAULT_MINOR;
362 if (address > vma->vm_end) {
363 page = NOPAGE_SIGBUS;
369 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
370 DRM_ERROR("Invalid compat nopage.\n");
371 page = NOPAGE_SIGBUS;
376 drm_ttm_fixup_caching(ttm);
377 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
378 page = drm_ttm_get_page(ttm, page_offset);
386 mutex_unlock(&bo->mutex);
393 int drm_bo_map_bound(struct vm_area_struct *vma)
395 drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
397 unsigned long bus_base;
398 unsigned long bus_offset;
399 unsigned long bus_size;
401 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
402 &bus_offset, &bus_size);
406 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
407 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
408 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
409 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
410 vma->vm_end - vma->vm_start,
418 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
420 p_mm_entry_t *entry, *n_entry;
421 vma_entry_t *v_entry;
422 struct mm_struct *mm = vma->vm_mm;
424 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
426 DRM_ERROR("Allocation of vma pointer entry failed\n");
431 list_add_tail(&v_entry->head, &bo->vma_list);
433 list_for_each_entry(entry, &bo->p_mm_list, head) {
434 if (mm == entry->mm) {
435 atomic_inc(&entry->refcount);
437 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
440 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
442 DRM_ERROR("Allocation of process mm pointer entry failed\n");
445 INIT_LIST_HEAD(&n_entry->head);
448 atomic_set(&n_entry->refcount, 0);
449 list_add_tail(&n_entry->head, &entry->head);
454 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
456 p_mm_entry_t *entry, *n;
457 vma_entry_t *v_entry, *v_n;
459 struct mm_struct *mm = vma->vm_mm;
461 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
462 if (v_entry->vma == vma) {
464 list_del(&v_entry->head);
465 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
471 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
472 if (mm == entry->mm) {
473 if (atomic_add_negative(-1, &entry->refcount)) {
474 list_del(&entry->head);
475 BUG_ON(entry->locked);
476 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
486 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
491 list_for_each_entry(entry, &bo->p_mm_list, head) {
492 BUG_ON(entry->locked);
493 if (!down_write_trylock(&entry->mm->mmap_sem)) {
503 list_for_each_entry(entry, &bo->p_mm_list, head) {
506 up_write(&entry->mm->mmap_sem);
511 * Possible deadlock. Try again. Our callers should handle this
518 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
522 list_for_each_entry(entry, &bo->p_mm_list, head) {
523 BUG_ON(!entry->locked);
524 up_write(&entry->mm->mmap_sem);
529 int drm_bo_remap_bound(drm_buffer_object_t *bo)
531 vma_entry_t *v_entry;
534 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
535 list_for_each_entry(v_entry, &bo->vma_list, head) {
536 ret = drm_bo_map_bound(v_entry->vma);
545 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
547 vma_entry_t *v_entry;
549 list_for_each_entry(v_entry, &bo->vma_list, head) {
550 v_entry->vma->vm_flags &= ~VM_PFNMAP;