1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99 struct fault_data *data);
102 struct page * get_nopage_retry(void)
104 if (atomic_read(&drm_np_retry.present) == 0) {
105 struct page *page = alloc_page(GFP_KERNEL);
108 spin_lock(&drm_np_retry.lock);
109 drm_np_retry.dummy_page = page;
110 atomic_set(&drm_np_retry.present,1);
111 spin_unlock(&drm_np_retry.lock);
113 get_page(drm_np_retry.dummy_page);
114 return drm_np_retry.dummy_page;
117 void free_nopage_retry(void)
119 if (atomic_read(&drm_np_retry.present) == 1) {
120 spin_lock(&drm_np_retry.lock);
121 __free_page(drm_np_retry.dummy_page);
122 drm_np_retry.dummy_page = NULL;
123 atomic_set(&drm_np_retry.present, 0);
124 spin_unlock(&drm_np_retry.lock);
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129 unsigned long address,
132 struct fault_data data;
135 *type = VM_FAULT_MINOR;
137 data.address = address;
139 drm_bo_vm_fault(vma, &data);
143 case VM_FAULT_SIGBUS:
144 return NOPAGE_SIGBUS;
149 return NOPAGE_REFAULT;
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
161 struct mm_struct *mm = vma->vm_mm;
168 spin_lock(&mm->page_table_lock);
169 pgd = pgd_offset(mm, addr);
172 pud = pud_offset(pgd, addr);
175 pmd = pmd_offset(pud, addr);
178 pte = pte_offset_map(pmd, addr);
181 ret = pte_none(*pte);
184 spin_unlock(&mm->page_table_lock);
189 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
193 if (!drm_pte_is_clear(vma, addr))
196 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
201 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
202 struct fault_data *data)
204 unsigned long address = data->address;
205 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
206 unsigned long page_offset;
207 struct page *page = NULL;
212 unsigned long bus_base;
213 unsigned long bus_offset;
214 unsigned long bus_size;
217 mutex_lock(&bo->mutex);
219 err = drm_bo_wait(bo, 0, 1, 0);
221 data->type = (err == -EAGAIN) ?
222 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
228 * If buffer happens to be in a non-mappable location,
229 * move it to a mappable.
232 if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
233 unsigned long _end = jiffies + 3*DRM_HZ;
234 uint32_t new_mask = bo->mem.mask |
235 DRM_BO_FLAG_MAPPABLE |
236 DRM_BO_FLAG_FORCE_MAPPABLE;
239 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
240 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
243 DRM_ERROR("Timeout moving buffer to mappable location.\n");
244 data->type = VM_FAULT_SIGBUS;
249 if (address > vma->vm_end) {
250 data->type = VM_FAULT_SIGBUS;
255 err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
259 data->type = VM_FAULT_SIGBUS;
263 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
266 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
268 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
269 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
273 drm_ttm_fixup_caching(ttm);
274 page = drm_ttm_get_page(ttm, page_offset);
276 data->type = VM_FAULT_OOM;
279 pfn = page_to_pfn(page);
280 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
281 vm_get_page_prot(vma->vm_flags) :
282 drm_io_prot(_DRM_TTM, vma);
285 err = vm_insert_pfn(vma, address, pfn);
287 if (!err || err == -EBUSY)
288 data->type = VM_FAULT_MINOR;
290 data->type = VM_FAULT_OOM;
292 mutex_unlock(&bo->mutex);
298 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
299 !defined(DRM_FULL_MM_COMPAT)
304 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
305 unsigned long address)
307 struct fault_data data;
308 data.address = address;
310 (void) drm_bo_vm_fault(vma, &data);
311 if (data.type == VM_FAULT_OOM)
313 else if (data.type == VM_FAULT_SIGBUS)
325 #ifdef DRM_ODD_MM_COMPAT
328 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
329 * workaround for a single BUG statement in do_no_page in these versions. The
330 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
331 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
332 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
333 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
334 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
338 typedef struct p_mm_entry {
339 struct list_head head;
340 struct mm_struct *mm;
345 typedef struct vma_entry {
346 struct list_head head;
347 struct vm_area_struct *vma;
351 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
352 unsigned long address,
355 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
356 unsigned long page_offset;
361 mutex_lock(&bo->mutex);
364 *type = VM_FAULT_MINOR;
366 if (address > vma->vm_end) {
367 page = NOPAGE_SIGBUS;
373 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
374 DRM_ERROR("Invalid compat nopage.\n");
375 page = NOPAGE_SIGBUS;
380 drm_ttm_fixup_caching(ttm);
381 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
382 page = drm_ttm_get_page(ttm, page_offset);
390 mutex_unlock(&bo->mutex);
397 int drm_bo_map_bound(struct vm_area_struct *vma)
399 drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
401 unsigned long bus_base;
402 unsigned long bus_offset;
403 unsigned long bus_size;
405 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
406 &bus_offset, &bus_size);
410 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
411 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
412 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
413 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
414 vma->vm_end - vma->vm_start,
422 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
424 p_mm_entry_t *entry, *n_entry;
425 vma_entry_t *v_entry;
426 struct mm_struct *mm = vma->vm_mm;
428 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
430 DRM_ERROR("Allocation of vma pointer entry failed\n");
435 list_add_tail(&v_entry->head, &bo->vma_list);
437 list_for_each_entry(entry, &bo->p_mm_list, head) {
438 if (mm == entry->mm) {
439 atomic_inc(&entry->refcount);
441 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
444 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
446 DRM_ERROR("Allocation of process mm pointer entry failed\n");
449 INIT_LIST_HEAD(&n_entry->head);
452 atomic_set(&n_entry->refcount, 0);
453 list_add_tail(&n_entry->head, &entry->head);
458 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
460 p_mm_entry_t *entry, *n;
461 vma_entry_t *v_entry, *v_n;
463 struct mm_struct *mm = vma->vm_mm;
465 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
466 if (v_entry->vma == vma) {
468 list_del(&v_entry->head);
469 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
475 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
476 if (mm == entry->mm) {
477 if (atomic_add_negative(-1, &entry->refcount)) {
478 list_del(&entry->head);
479 BUG_ON(entry->locked);
480 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
490 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
495 list_for_each_entry(entry, &bo->p_mm_list, head) {
496 BUG_ON(entry->locked);
497 if (!down_write_trylock(&entry->mm->mmap_sem)) {
507 list_for_each_entry(entry, &bo->p_mm_list, head) {
510 up_write(&entry->mm->mmap_sem);
515 * Possible deadlock. Try again. Our callers should handle this
522 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
526 list_for_each_entry(entry, &bo->p_mm_list, head) {
527 BUG_ON(!entry->locked);
528 up_write(&entry->mm->mmap_sem);
533 int drm_bo_remap_bound(drm_buffer_object_t *bo)
535 vma_entry_t *v_entry;
538 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
539 list_for_each_entry(v_entry, &bo->vma_list, head) {
540 ret = drm_bo_map_bound(v_entry->vma);
549 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
551 vma_entry_t *v_entry;
553 list_for_each_entry(v_entry, &bo->vma_list, head) {
554 v_entry->vma->vm_flags &= ~VM_PFNMAP;