1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99 struct fault_data *data);
102 struct page * get_nopage_retry(void)
104 if (atomic_read(&drm_np_retry.present) == 0) {
105 struct page *page = alloc_page(GFP_KERNEL);
108 spin_lock(&drm_np_retry.lock);
109 drm_np_retry.dummy_page = page;
110 atomic_set(&drm_np_retry.present,1);
111 spin_unlock(&drm_np_retry.lock);
113 get_page(drm_np_retry.dummy_page);
114 return drm_np_retry.dummy_page;
117 void free_nopage_retry(void)
119 if (atomic_read(&drm_np_retry.present) == 1) {
120 spin_lock(&drm_np_retry.lock);
121 __free_page(drm_np_retry.dummy_page);
122 drm_np_retry.dummy_page = NULL;
123 atomic_set(&drm_np_retry.present, 0);
124 spin_unlock(&drm_np_retry.lock);
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129 unsigned long address,
132 struct fault_data data;
135 *type = VM_FAULT_MINOR;
137 data.address = address;
139 drm_bo_vm_fault(vma, &data);
143 case VM_FAULT_SIGBUS:
144 return NOPAGE_SIGBUS;
149 return NOPAGE_REFAULT;
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
161 struct mm_struct *mm = vma->vm_mm;
168 spin_lock(&mm->page_table_lock);
169 pgd = pgd_offset(mm, addr);
172 pud = pud_offset(pgd, addr);
175 pmd = pmd_offset(pud, addr);
178 pte = pte_offset_map(pmd, addr);
181 ret = pte_none(*pte);
184 spin_unlock(&mm->page_table_lock);
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
192 if (!drm_pte_is_clear(vma, addr))
195 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
200 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
201 struct fault_data *data)
203 unsigned long address = data->address;
204 struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
205 unsigned long page_offset;
206 struct page *page = NULL;
211 unsigned long bus_base;
212 unsigned long bus_offset;
213 unsigned long bus_size;
216 mutex_lock(&bo->mutex);
218 err = drm_bo_wait(bo, 0, 1, 0);
220 data->type = (err == -EAGAIN) ?
221 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
227 * If buffer happens to be in a non-mappable location,
228 * move it to a mappable.
231 if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
232 unsigned long _end = jiffies + 3*DRM_HZ;
233 uint32_t new_mask = bo->mem.mask |
234 DRM_BO_FLAG_MAPPABLE |
235 DRM_BO_FLAG_FORCE_MAPPABLE;
238 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
239 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
242 DRM_ERROR("Timeout moving buffer to mappable location.\n");
243 data->type = VM_FAULT_SIGBUS;
248 if (address > vma->vm_end) {
249 data->type = VM_FAULT_SIGBUS;
254 err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
258 data->type = VM_FAULT_SIGBUS;
262 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
265 drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
267 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
268 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
272 drm_ttm_fixup_caching(ttm);
273 page = drm_ttm_get_page(ttm, page_offset);
275 data->type = VM_FAULT_OOM;
278 pfn = page_to_pfn(page);
279 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
280 vm_get_page_prot(vma->vm_flags) :
281 drm_io_prot(_DRM_TTM, vma);
284 err = vm_insert_pfn(vma, address, pfn);
286 if (!err || err == -EBUSY)
287 data->type = VM_FAULT_MINOR;
289 data->type = VM_FAULT_OOM;
291 mutex_unlock(&bo->mutex);
297 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
298 !defined(DRM_FULL_MM_COMPAT)
303 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
304 unsigned long address)
306 struct fault_data data;
307 data.address = address;
309 (void) drm_bo_vm_fault(vma, &data);
310 if (data.type == VM_FAULT_OOM)
312 else if (data.type == VM_FAULT_SIGBUS)
324 #ifdef DRM_ODD_MM_COMPAT
327 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
328 * workaround for a single BUG statement in do_no_page in these versions. The
329 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
330 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
331 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
332 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
333 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
337 typedef struct p_mm_entry {
338 struct list_head head;
339 struct mm_struct *mm;
344 typedef struct vma_entry {
345 struct list_head head;
346 struct vm_area_struct *vma;
350 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
351 unsigned long address,
354 struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
355 unsigned long page_offset;
360 mutex_lock(&bo->mutex);
363 *type = VM_FAULT_MINOR;
365 if (address > vma->vm_end) {
366 page = NOPAGE_SIGBUS;
372 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
373 DRM_ERROR("Invalid compat nopage.\n");
374 page = NOPAGE_SIGBUS;
379 drm_ttm_fixup_caching(ttm);
380 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
381 page = drm_ttm_get_page(ttm, page_offset);
389 mutex_unlock(&bo->mutex);
396 int drm_bo_map_bound(struct vm_area_struct *vma)
398 struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
400 unsigned long bus_base;
401 unsigned long bus_offset;
402 unsigned long bus_size;
404 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
405 &bus_offset, &bus_size);
409 drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
410 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
411 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
412 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
413 vma->vm_end - vma->vm_start,
421 int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
423 p_mm_entry_t *entry, *n_entry;
424 vma_entry_t *v_entry;
425 struct mm_struct *mm = vma->vm_mm;
427 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
429 DRM_ERROR("Allocation of vma pointer entry failed\n");
434 list_add_tail(&v_entry->head, &bo->vma_list);
436 list_for_each_entry(entry, &bo->p_mm_list, head) {
437 if (mm == entry->mm) {
438 atomic_inc(&entry->refcount);
440 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
443 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
445 DRM_ERROR("Allocation of process mm pointer entry failed\n");
448 INIT_LIST_HEAD(&n_entry->head);
451 atomic_set(&n_entry->refcount, 0);
452 list_add_tail(&n_entry->head, &entry->head);
457 void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
459 p_mm_entry_t *entry, *n;
460 vma_entry_t *v_entry, *v_n;
462 struct mm_struct *mm = vma->vm_mm;
464 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
465 if (v_entry->vma == vma) {
467 list_del(&v_entry->head);
468 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
474 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
475 if (mm == entry->mm) {
476 if (atomic_add_negative(-1, &entry->refcount)) {
477 list_del(&entry->head);
478 BUG_ON(entry->locked);
479 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
489 int drm_bo_lock_kmm(struct drm_buffer_object * bo)
494 list_for_each_entry(entry, &bo->p_mm_list, head) {
495 BUG_ON(entry->locked);
496 if (!down_write_trylock(&entry->mm->mmap_sem)) {
506 list_for_each_entry(entry, &bo->p_mm_list, head) {
509 up_write(&entry->mm->mmap_sem);
514 * Possible deadlock. Try again. Our callers should handle this
521 void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
525 list_for_each_entry(entry, &bo->p_mm_list, head) {
526 BUG_ON(!entry->locked);
527 up_write(&entry->mm->mmap_sem);
532 int drm_bo_remap_bound(struct drm_buffer_object *bo)
534 vma_entry_t *v_entry;
537 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
538 list_for_each_entry(v_entry, &bo->vma_list, head) {
539 ret = drm_bo_map_bound(v_entry->vma);
548 void drm_bo_finish_unmap(struct drm_buffer_object *bo)
550 vma_entry_t *v_entry;
552 list_for_each_entry(v_entry, &bo->vma_list, head) {
553 v_entry->vma->vm_flags &= ~VM_PFNMAP;
559 #ifdef DRM_IDR_COMPAT_FN
560 /* only called when idp->lock is held */
561 static void __free_layer(struct idr *idp, struct idr_layer *p)
563 p->ary[0] = idp->id_free;
568 static void free_layer(struct idr *idp, struct idr_layer *p)
573 * Depends on the return element being zeroed.
575 spin_lock_irqsave(&idp->lock, flags);
576 __free_layer(idp, p);
577 spin_unlock_irqrestore(&idp->lock, flags);
581 * idr_for_each - iterate through all stored pointers
583 * @fn: function to be called for each pointer
584 * @data: data passed back to callback function
586 * Iterate over the pointers registered with the given idr. The
587 * callback function will be called for each pointer currently
588 * registered, passing the id, the pointer and the data pointer passed
589 * to this function. It is not safe to modify the idr tree while in
590 * the callback, so functions such as idr_get_new and idr_remove are
593 * We check the return of @fn each time. If it returns anything other
594 * than 0, we break out and return that value.
596 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
598 int idr_for_each(struct idr *idp,
599 int (*fn)(int id, void *p, void *data), void *data)
601 int n, id, max, error = 0;
603 struct idr_layer *pa[MAX_LEVEL];
604 struct idr_layer **paa = &pa[0];
606 n = idp->layers * IDR_BITS;
615 p = p->ary[(id >> n) & IDR_MASK];
619 error = fn(id, (void *)p, data);
625 while (n < fls(id)) {
633 EXPORT_SYMBOL(idr_for_each);
636 * idr_remove_all - remove all ids from the given idr tree
639 * idr_destroy() only frees up unused, cached idp_layers, but this
640 * function will remove all id mappings and leave all idp_layers
643 * A typical clean-up sequence for objects stored in an idr tree, will
644 * use idr_for_each() to free all objects, if necessay, then
645 * idr_remove_all() to remove all ids, and idr_destroy() to free
646 * up the cached idr_layers.
648 void idr_remove_all(struct idr *idp)
650 int n, id, max, error = 0;
652 struct idr_layer *pa[MAX_LEVEL];
653 struct idr_layer **paa = &pa[0];
655 n = idp->layers * IDR_BITS;
660 while (id < max && !error) {
661 while (n > IDR_BITS && p) {
664 p = p->ary[(id >> n) & IDR_MASK];
668 while (n < fls(id)) {
670 memset(p, 0, sizeof *p);
680 EXPORT_SYMBOL(idr_remove_all);