1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99 struct fault_data *data);
102 struct page * get_nopage_retry(void)
104 if (atomic_read(&drm_np_retry.present) == 0) {
105 struct page *page = alloc_page(GFP_KERNEL);
108 spin_lock(&drm_np_retry.lock);
109 drm_np_retry.dummy_page = page;
110 atomic_set(&drm_np_retry.present,1);
111 spin_unlock(&drm_np_retry.lock);
113 get_page(drm_np_retry.dummy_page);
114 return drm_np_retry.dummy_page;
117 void free_nopage_retry(void)
119 if (atomic_read(&drm_np_retry.present) == 1) {
120 spin_lock(&drm_np_retry.lock);
121 __free_page(drm_np_retry.dummy_page);
122 drm_np_retry.dummy_page = NULL;
123 atomic_set(&drm_np_retry.present, 0);
124 spin_unlock(&drm_np_retry.lock);
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129 unsigned long address,
132 struct fault_data data;
135 *type = VM_FAULT_MINOR;
137 data.address = address;
139 drm_bo_vm_fault(vma, &data);
143 case VM_FAULT_SIGBUS:
144 return NOPAGE_SIGBUS;
149 return NOPAGE_REFAULT;
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
161 struct mm_struct *mm = vma->vm_mm;
168 spin_lock(&mm->page_table_lock);
169 pgd = pgd_offset(mm, addr);
172 pud = pud_offset(pgd, addr);
175 pmd = pmd_offset(pud, addr);
178 pte = pte_offset_map(pmd, addr);
181 ret = pte_none(*pte);
184 spin_unlock(&mm->page_table_lock);
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
192 if (!drm_pte_is_clear(vma, addr))
195 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
199 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
200 struct fault_data *data)
202 unsigned long address = data->address;
203 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
204 unsigned long page_offset;
205 struct page *page = NULL;
210 unsigned long bus_base;
211 unsigned long bus_offset;
212 unsigned long bus_size;
215 mutex_lock(&bo->mutex);
217 err = drm_bo_wait(bo, 0, 1, 0);
219 data->type = (err == -EAGAIN) ?
220 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
226 * If buffer happens to be in a non-mappable location,
227 * move it to a mappable.
230 if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
231 unsigned long _end = jiffies + 3*DRM_HZ;
232 uint32_t new_mask = bo->mem.mask |
233 DRM_BO_FLAG_MAPPABLE |
234 DRM_BO_FLAG_FORCE_MAPPABLE;
237 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
238 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
241 DRM_ERROR("Timeout moving buffer to mappable location.\n");
242 data->type = VM_FAULT_SIGBUS;
247 if (address > vma->vm_end) {
248 data->type = VM_FAULT_SIGBUS;
253 err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
257 data->type = VM_FAULT_SIGBUS;
261 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
264 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
266 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
267 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
271 drm_ttm_fixup_caching(ttm);
272 page = drm_ttm_get_page(ttm, page_offset);
274 data->type = VM_FAULT_OOM;
277 pfn = page_to_pfn(page);
278 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
279 vm_get_page_prot(vma->vm_flags) :
280 drm_io_prot(_DRM_TTM, vma);
283 err = vm_insert_pfn(vma, address, pfn);
285 if (!err || err == -EBUSY)
286 data->type = VM_FAULT_MINOR;
288 data->type = VM_FAULT_OOM;
290 mutex_unlock(&bo->mutex);
296 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
297 !defined(DRM_FULL_MM_COMPAT)
302 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
303 unsigned long address)
305 struct fault_data data;
306 data.address = address;
308 (void) drm_bo_vm_fault(vma, &data);
309 if (data.type == VM_FAULT_OOM)
311 else if (data.type == VM_FAULT_SIGBUS)
323 #ifdef DRM_ODD_MM_COMPAT
326 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
327 * workaround for a single BUG statement in do_no_page in these versions. The
328 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
329 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
330 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
331 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
332 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
336 typedef struct p_mm_entry {
337 struct list_head head;
338 struct mm_struct *mm;
343 typedef struct vma_entry {
344 struct list_head head;
345 struct vm_area_struct *vma;
349 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
350 unsigned long address,
353 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
354 unsigned long page_offset;
359 mutex_lock(&bo->mutex);
362 *type = VM_FAULT_MINOR;
364 if (address > vma->vm_end) {
365 page = NOPAGE_SIGBUS;
371 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
372 DRM_ERROR("Invalid compat nopage.\n");
373 page = NOPAGE_SIGBUS;
378 drm_ttm_fixup_caching(ttm);
379 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
380 page = drm_ttm_get_page(ttm, page_offset);
388 mutex_unlock(&bo->mutex);
395 int drm_bo_map_bound(struct vm_area_struct *vma)
397 drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
399 unsigned long bus_base;
400 unsigned long bus_offset;
401 unsigned long bus_size;
403 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
404 &bus_offset, &bus_size);
408 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
409 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
410 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
411 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
412 vma->vm_end - vma->vm_start,
420 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
422 p_mm_entry_t *entry, *n_entry;
423 vma_entry_t *v_entry;
424 struct mm_struct *mm = vma->vm_mm;
426 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
428 DRM_ERROR("Allocation of vma pointer entry failed\n");
433 list_add_tail(&v_entry->head, &bo->vma_list);
435 list_for_each_entry(entry, &bo->p_mm_list, head) {
436 if (mm == entry->mm) {
437 atomic_inc(&entry->refcount);
439 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
442 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
444 DRM_ERROR("Allocation of process mm pointer entry failed\n");
447 INIT_LIST_HEAD(&n_entry->head);
450 atomic_set(&n_entry->refcount, 0);
451 list_add_tail(&n_entry->head, &entry->head);
456 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
458 p_mm_entry_t *entry, *n;
459 vma_entry_t *v_entry, *v_n;
461 struct mm_struct *mm = vma->vm_mm;
463 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
464 if (v_entry->vma == vma) {
466 list_del(&v_entry->head);
467 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
473 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
474 if (mm == entry->mm) {
475 if (atomic_add_negative(-1, &entry->refcount)) {
476 list_del(&entry->head);
477 BUG_ON(entry->locked);
478 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
488 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
493 list_for_each_entry(entry, &bo->p_mm_list, head) {
494 BUG_ON(entry->locked);
495 if (!down_write_trylock(&entry->mm->mmap_sem)) {
505 list_for_each_entry(entry, &bo->p_mm_list, head) {
508 up_write(&entry->mm->mmap_sem);
513 * Possible deadlock. Try again. Our callers should handle this
520 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
524 list_for_each_entry(entry, &bo->p_mm_list, head) {
525 BUG_ON(!entry->locked);
526 up_write(&entry->mm->mmap_sem);
531 int drm_bo_remap_bound(drm_buffer_object_t *bo)
533 vma_entry_t *v_entry;
536 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
537 list_for_each_entry(v_entry, &bo->vma_list, head) {
538 ret = drm_bo_map_bound(v_entry->vma);
547 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
549 vma_entry_t *v_entry;
551 list_for_each_entry(v_entry, &bo->vma_list, head) {
552 v_entry->vma->vm_flags &= ~VM_PFNMAP;