1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2,6,15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
88 static int drm_pte_is_clear(struct vm_area_struct *vma,
91 struct mm_struct *mm = vma->vm_mm;
99 spin_lock(&mm->page_table_lock);
100 pgd = pgd_offset(mm, addr);
103 pud = pud_offset(pgd, addr);
106 pmd = pmd_offset(pud, addr);
109 pte = pte_offset_map(pmd, addr);
112 ret = pte_none(*pte);
115 spin_unlock(&mm->page_table_lock);
119 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
120 unsigned long pfn, pgprot_t pgprot)
123 if (!drm_pte_is_clear(vma, addr))
126 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
132 struct page *dummy_page;
135 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
137 struct page * get_nopage_retry(void)
139 if (atomic_read(&drm_np_retry.present) == 0) {
140 struct page *page = alloc_page(GFP_KERNEL);
143 spin_lock(&drm_np_retry.lock);
144 drm_np_retry.dummy_page = page;
145 atomic_set(&drm_np_retry.present,1);
146 spin_unlock(&drm_np_retry.lock);
148 get_page(drm_np_retry.dummy_page);
149 return drm_np_retry.dummy_page;
152 void free_nopage_retry(void)
154 if (atomic_read(&drm_np_retry.present) == 1) {
155 spin_lock(&drm_np_retry.lock);
156 __free_page(drm_np_retry.dummy_page);
157 drm_np_retry.dummy_page = NULL;
158 atomic_set(&drm_np_retry.present, 0);
159 spin_unlock(&drm_np_retry.lock);
163 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
164 unsigned long address,
167 struct fault_data data;
170 *type = VM_FAULT_MINOR;
172 data.address = address;
174 drm_bo_vm_fault(vma, &data);
178 case VM_FAULT_SIGBUS:
179 return NOPAGE_SIGBUS;
184 return NOPAGE_REFAULT;
189 #ifdef DRM_ODD_MM_COMPAT
192 * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
193 * workaround for a single BUG statement in do_no_page in these versions. The
194 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
195 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
196 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
197 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
198 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
202 typedef struct p_mm_entry {
203 struct list_head head;
204 struct mm_struct *mm;
209 typedef struct vma_entry {
210 struct list_head head;
211 struct vm_area_struct *vma;
215 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
216 unsigned long address,
219 drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
220 unsigned long page_offset;
225 mutex_lock(&bo->mutex);
228 *type = VM_FAULT_MINOR;
230 if (address > vma->vm_end) {
231 page = NOPAGE_SIGBUS;
237 if (drm_mem_reg_is_pci(dev, &bo->mem)) {
238 DRM_ERROR("Invalid compat nopage.\n");
239 page = NOPAGE_SIGBUS;
244 drm_ttm_fixup_caching(ttm);
245 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
246 page = drm_ttm_get_page(ttm, page_offset);
254 mutex_unlock(&bo->mutex);
261 int drm_bo_map_bound(struct vm_area_struct *vma)
263 drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
265 unsigned long bus_base;
266 unsigned long bus_offset;
267 unsigned long bus_size;
269 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
270 &bus_offset, &bus_size);
274 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
275 pgprot_t pgprot = drm_io_prot(_DRM_AGP, vma);
276 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
277 vma->vm_end - vma->vm_start,
285 int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
287 p_mm_entry_t *entry, *n_entry;
288 vma_entry_t *v_entry;
289 struct mm_struct *mm = vma->vm_mm;
291 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
293 DRM_ERROR("Allocation of vma pointer entry failed\n");
298 list_add_tail(&v_entry->head, &bo->vma_list);
300 list_for_each_entry(entry, &bo->p_mm_list, head) {
301 if (mm == entry->mm) {
302 atomic_inc(&entry->refcount);
304 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
307 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
309 DRM_ERROR("Allocation of process mm pointer entry failed\n");
312 INIT_LIST_HEAD(&n_entry->head);
315 atomic_set(&n_entry->refcount, 0);
316 list_add_tail(&n_entry->head, &entry->head);
321 void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
323 p_mm_entry_t *entry, *n;
324 vma_entry_t *v_entry, *v_n;
326 struct mm_struct *mm = vma->vm_mm;
328 list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
329 if (v_entry->vma == vma) {
331 list_del(&v_entry->head);
332 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
338 list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
339 if (mm == entry->mm) {
340 if (atomic_add_negative(-1, &entry->refcount)) {
341 list_del(&entry->head);
342 BUG_ON(entry->locked);
343 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
353 int drm_bo_lock_kmm(drm_buffer_object_t * bo)
358 list_for_each_entry(entry, &bo->p_mm_list, head) {
359 BUG_ON(entry->locked);
360 if (!down_write_trylock(&entry->mm->mmap_sem)) {
370 list_for_each_entry(entry, &bo->p_mm_list, head) {
373 up_write(&entry->mm->mmap_sem);
378 * Possible deadlock. Try again. Our callers should handle this
385 void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
389 list_for_each_entry(entry, &bo->p_mm_list, head) {
390 BUG_ON(!entry->locked);
391 up_write(&entry->mm->mmap_sem);
396 int drm_bo_remap_bound(drm_buffer_object_t *bo)
398 vma_entry_t *v_entry;
401 if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
402 list_for_each_entry(v_entry, &bo->vma_list, head) {
403 ret = drm_bo_map_bound(v_entry->vma);
412 void drm_bo_finish_unmap(drm_buffer_object_t *bo)
414 vma_entry_t *v_entry;
416 list_for_each_entry(v_entry, &bo->vma_list, head) {
417 v_entry->vma->vm_flags &= ~VM_PFNMAP;