1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2.6.15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
86 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
92 struct page *dummy_page;
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99 struct fault_data *data);
102 struct page * get_nopage_retry(void)
104 if (atomic_read(&drm_np_retry.present) == 0) {
105 struct page *page = alloc_page(GFP_KERNEL);
108 spin_lock(&drm_np_retry.lock);
109 drm_np_retry.dummy_page = page;
110 atomic_set(&drm_np_retry.present,1);
111 spin_unlock(&drm_np_retry.lock);
113 get_page(drm_np_retry.dummy_page);
114 return drm_np_retry.dummy_page;
117 void free_nopage_retry(void)
119 if (atomic_read(&drm_np_retry.present) == 1) {
120 spin_lock(&drm_np_retry.lock);
121 __free_page(drm_np_retry.dummy_page);
122 drm_np_retry.dummy_page = NULL;
123 atomic_set(&drm_np_retry.present, 0);
124 spin_unlock(&drm_np_retry.lock);
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129 unsigned long address,
132 struct fault_data data;
135 *type = VM_FAULT_MINOR;
137 data.address = address;
139 drm_bo_vm_fault(vma, &data);
143 case VM_FAULT_SIGBUS:
144 return NOPAGE_SIGBUS;
149 return NOPAGE_REFAULT;
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155 ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156 (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
161 struct mm_struct *mm = vma->vm_mm;
168 spin_lock(&mm->page_table_lock);
169 pgd = pgd_offset(mm, addr);
172 pud = pud_offset(pgd, addr);
175 pmd = pmd_offset(pud, addr);
178 pte = pte_offset_map(pmd, addr);
181 ret = pte_none(*pte);
184 spin_unlock(&mm->page_table_lock);
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
192 if (!drm_pte_is_clear(vma, addr))
195 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
200 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
201 struct fault_data *data)
203 unsigned long address = data->address;
204 struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
205 unsigned long page_offset;
206 struct page *page = NULL;
208 struct drm_device *dev;
211 unsigned long bus_base;
212 unsigned long bus_offset;
213 unsigned long bus_size;
216 drm_bo_read_lock(&dev->bm.bm_lock, 0);
218 mutex_lock(&bo->mutex);
220 err = drm_bo_wait(bo, 0, 1, 0);
222 data->type = (err == -EAGAIN) ?
223 VM_FAULT_MINOR : VM_FAULT_SIGBUS;
229 * If buffer happens to be in a non-mappable location,
230 * move it to a mappable.
233 if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
234 unsigned long _end = jiffies + 3*DRM_HZ;
235 uint32_t new_mask = bo->mem.proposed_flags |
236 DRM_BO_FLAG_MAPPABLE |
237 DRM_BO_FLAG_FORCE_MAPPABLE;
240 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
241 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
244 DRM_ERROR("Timeout moving buffer to mappable location.\n");
245 data->type = VM_FAULT_SIGBUS;
250 if (address > vma->vm_end) {
251 data->type = VM_FAULT_SIGBUS;
256 err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
260 data->type = VM_FAULT_SIGBUS;
264 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
267 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
269 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
270 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
274 drm_ttm_fixup_caching(ttm);
275 page = drm_ttm_get_page(ttm, page_offset);
277 data->type = VM_FAULT_OOM;
280 pfn = page_to_pfn(page);
281 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
282 vm_get_page_prot(vma->vm_flags) :
283 drm_io_prot(_DRM_TTM, vma);
286 err = vm_insert_pfn(vma, address, pfn);
288 if (!err || err == -EBUSY)
289 data->type = VM_FAULT_MINOR;
291 data->type = VM_FAULT_OOM;
293 mutex_unlock(&bo->mutex);
294 drm_bo_read_unlock(&dev->bm.bm_lock);
300 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
301 !defined(DRM_FULL_MM_COMPAT)
306 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
307 unsigned long address)
309 struct fault_data data;
310 data.address = address;
312 (void) drm_bo_vm_fault(vma, &data);
313 if (data.type == VM_FAULT_OOM)
315 else if (data.type == VM_FAULT_SIGBUS)
327 #ifdef DRM_ODD_MM_COMPAT
330 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
331 * workaround for a single BUG statement in do_no_page in these versions. The
332 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
333 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
334 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
335 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
336 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
340 typedef struct p_mm_entry {
341 struct list_head head;
342 struct mm_struct *mm;
347 typedef struct vma_entry {
348 struct list_head head;
349 struct vm_area_struct *vma;
353 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
354 unsigned long address,
357 struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
358 unsigned long page_offset;
361 struct drm_device *dev;
363 mutex_lock(&bo->mutex);
366 *type = VM_FAULT_MINOR;
368 if (address > vma->vm_end) {
369 page = NOPAGE_SIGBUS;
375 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
376 DRM_ERROR("Invalid compat nopage.\n");
377 page = NOPAGE_SIGBUS;
382 drm_ttm_fixup_caching(ttm);
383 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
384 page = drm_ttm_get_page(ttm, page_offset);
392 mutex_unlock(&bo->mutex);
399 int drm_bo_map_bound(struct vm_area_struct *vma)
401 struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
403 unsigned long bus_base;
404 unsigned long bus_offset;
405 unsigned long bus_size;
407 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
408 &bus_offset, &bus_size);
412 struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
413 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
414 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
415 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
416 vma->vm_end - vma->vm_start,
424 int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
426 p_mm_entry_t *entry, *n_entry;
427 vma_entry_t *v_entry;
428 struct mm_struct *mm = vma->vm_mm;
430 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
432 DRM_ERROR("Allocation of vma pointer entry failed\n");
437 list_add_tail(&v_entry->head, &bo->vma_list);
439 list_for_each_entry(entry, &bo->p_mm_list, head) {
440 if (mm == entry->mm) {
441 atomic_inc(&entry->refcount);
443 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
446 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
448 DRM_ERROR("Allocation of process mm pointer entry failed\n");
451 INIT_LIST_HEAD(&n_entry->head);
454 atomic_set(&n_entry->refcount, 0);
455 list_add_tail(&n_entry->head, &entry->head);
460 void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
462 p_mm_entry_t *entry, *n;
463 vma_entry_t *v_entry, *v_n;
465 struct mm_struct *mm = vma->vm_mm;
467 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
468 if (v_entry->vma == vma) {
470 list_del(&v_entry->head);
471 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
477 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
478 if (mm == entry->mm) {
479 if (atomic_add_negative(-1, &entry->refcount)) {
480 list_del(&entry->head);
481 BUG_ON(entry->locked);
482 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
492 int drm_bo_lock_kmm(struct drm_buffer_object * bo)
497 list_for_each_entry(entry, &bo->p_mm_list, head) {
498 BUG_ON(entry->locked);
499 if (!down_write_trylock(&entry->mm->mmap_sem)) {
509 list_for_each_entry(entry, &bo->p_mm_list, head) {
512 up_write(&entry->mm->mmap_sem);
517 * Possible deadlock. Try again. Our callers should handle this
524 void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
528 list_for_each_entry(entry, &bo->p_mm_list, head) {
529 BUG_ON(!entry->locked);
530 up_write(&entry->mm->mmap_sem);
535 int drm_bo_remap_bound(struct drm_buffer_object *bo)
537 vma_entry_t *v_entry;
540 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
541 list_for_each_entry(v_entry, &bo->vma_list, head) {
542 ret = drm_bo_map_bound(v_entry->vma);
551 void drm_bo_finish_unmap(struct drm_buffer_object *bo)
553 vma_entry_t *v_entry;
555 list_for_each_entry(v_entry, &bo->vma_list, head) {
556 v_entry->vma->vm_flags &= ~VM_PFNMAP;
562 #ifdef DRM_IDR_COMPAT_FN
563 /* only called when idp->lock is held */
564 static void __free_layer(struct idr *idp, struct idr_layer *p)
566 p->ary[0] = idp->id_free;
571 static void free_layer(struct idr *idp, struct idr_layer *p)
576 * Depends on the return element being zeroed.
578 spin_lock_irqsave(&idp->lock, flags);
579 __free_layer(idp, p);
580 spin_unlock_irqrestore(&idp->lock, flags);
584 * idr_for_each - iterate through all stored pointers
586 * @fn: function to be called for each pointer
587 * @data: data passed back to callback function
589 * Iterate over the pointers registered with the given idr. The
590 * callback function will be called for each pointer currently
591 * registered, passing the id, the pointer and the data pointer passed
592 * to this function. It is not safe to modify the idr tree while in
593 * the callback, so functions such as idr_get_new and idr_remove are
596 * We check the return of @fn each time. If it returns anything other
597 * than 0, we break out and return that value.
599 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
601 int idr_for_each(struct idr *idp,
602 int (*fn)(int id, void *p, void *data), void *data)
604 int n, id, max, error = 0;
606 struct idr_layer *pa[MAX_LEVEL];
607 struct idr_layer **paa = &pa[0];
609 n = idp->layers * IDR_BITS;
618 p = p->ary[(id >> n) & IDR_MASK];
622 error = fn(id, (void *)p, data);
628 while (n < fls(id)) {
636 EXPORT_SYMBOL(idr_for_each);
639 * idr_remove_all - remove all ids from the given idr tree
642 * idr_destroy() only frees up unused, cached idp_layers, but this
643 * function will remove all id mappings and leave all idp_layers
646 * A typical clean-up sequence for objects stored in an idr tree, will
647 * use idr_for_each() to free all objects, if necessay, then
648 * idr_remove_all() to remove all ids, and idr_destroy() to free
649 * up the cached idr_layers.
651 void idr_remove_all(struct idr *idp)
653 int n, id, max, error = 0;
655 struct idr_layer *pa[MAX_LEVEL];
656 struct idr_layer **paa = &pa[0];
658 n = idp->layers * IDR_BITS;
663 while (id < max && !error) {
664 while (n > IDR_BITS && p) {
667 p = p->ary[(id >> n) & IDR_MASK];
671 while (n < fls(id)) {
673 memset(p, 0, sizeof *p);
683 EXPORT_SYMBOL(idr_remove_all);
685 #endif /* DRM_IDR_COMPAT_FN */
689 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
691 * idr_replace - replace pointer for given id
693 * @ptr: pointer you want associated with the id
696 * Replace the pointer registered with an id and return the old value.
697 * A -ENOENT return indicates that @id was not found.
698 * A -EINVAL return indicates that @id was not within valid constraints.
700 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
702 void *idr_replace(struct idr *idp, void *ptr, int id)
705 struct idr_layer *p, *old_p;
707 n = idp->layers * IDR_BITS;
713 return ERR_PTR(-EINVAL);
716 while ((n > 0) && p) {
717 p = p->ary[(id >> n) & IDR_MASK];
722 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
723 return ERR_PTR(-ENOENT);
728 return (void *)old_p;
730 EXPORT_SYMBOL(idr_replace);
733 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
734 static __inline__ unsigned long __round_jiffies(unsigned long j, int cpu)
737 unsigned long original = j;
743 if (rem < HZ/4) /* round down */
748 /* now that we have rounded, subtract the extra skew again */
751 if (j <= jiffies) /* rounding ate our timeout entirely; */
756 static __inline__ unsigned long __round_jiffies_relative(unsigned long j, int cpu)
758 return __round_jiffies(j + jiffies, cpu) - jiffies;
761 unsigned long round_jiffies_relative(unsigned long j)
763 return __round_jiffies_relative(j, raw_smp_processor_id());
765 EXPORT_SYMBOL(round_jiffies_relative);
768 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
769 struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
771 struct pci_dev *dev = NULL;
773 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
774 if (pci_domain_nr(dev->bus) == 0 &&
775 (dev->bus->number == bus && dev->devfn == devfn))
780 EXPORT_SYMBOL(pci_get_bus_and_slot);
783 #if defined(DRM_KMAP_ATOMIC_PROT_PFN)
784 #define drm_kmap_get_fixmap_pte(vaddr) \
785 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
787 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
790 enum fixed_addresses idx;
792 static pte_t *km_pte;
793 static int initialized = 0;
795 if (unlikely(!initialized)) {
796 km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
801 idx = type + KM_TYPE_NR*smp_processor_id();
802 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
803 set_pte(km_pte-idx, pfn_pte(pfn, protection));
805 return (void*) vaddr;
808 EXPORT_SYMBOL(kmap_atomic_prot_pfn);