1 /**************************************************************************
3 * This kernel module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of the
6 * License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 **************************************************************************/
19 * This code provides access to unexported mm kernel features. It is necessary
20 * to use the new DRM memory manager code with kernels that don't support it
23 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24 * Linux kernel mm subsystem authors.
25 * (Most code taken from there).
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
33 * These have bad performance in the AGP module for the indicated kernel versions.
36 int drm_map_page_into_agp(struct page *page)
39 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40 /* Caller's responsibility to call global_flush_tlb() for
41 * performance reasons */
45 int drm_unmap_page_from_agp(struct page *page)
48 i = change_page_attr(page, 1, PAGE_KERNEL);
49 /* Caller's responsibility to call global_flush_tlb() for
50 * performance reasons */
56 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
59 * The protection map was exported in 2.6.19
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65 static pgprot_t drm_protection_map[16] = {
66 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70 return drm_protection_map[vm_flags & 0x0F];
72 extern pgprot_t protection_map[];
73 return protection_map[vm_flags & 0x0F];
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
82 * vm code for kernels below 2,6,15 in which version a major vm write
83 * occured. This implement a simple straightforward
84 * version similar to what's going to be
88 static int drm_pte_is_clear(struct vm_area_struct *vma,
91 struct mm_struct *mm = vma->vm_mm;
99 spin_lock(&mm->page_table_lock);
100 pgd = pgd_offset(mm, addr);
103 pud = pud_offset(pgd, addr);
106 pmd = pmd_offset(pud, addr);
109 pte = pte_offset_map(pmd, addr);
112 ret = pte_none(*pte);
115 spin_unlock(&mm->page_table_lock);
119 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
120 unsigned long pfn, pgprot_t pgprot)
123 if (!drm_pte_is_clear(vma, addr))
126 ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
132 struct page *dummy_page;
135 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
137 struct page * get_nopage_retry(void)
139 if (atomic_read(&drm_np_retry.present) == 0) {
140 struct page *page = alloc_page(GFP_KERNEL);
143 spin_lock(&drm_np_retry.lock);
144 drm_np_retry.dummy_page = page;
145 atomic_set(&drm_np_retry.present,1);
146 spin_unlock(&drm_np_retry.lock);
148 get_page(drm_np_retry.dummy_page);
149 return drm_np_retry.dummy_page;
152 void free_nopage_retry(void)
154 if (atomic_read(&drm_np_retry.present) == 1) {
155 spin_lock(&drm_np_retry.lock);
156 __free_page(drm_np_retry.dummy_page);
157 drm_np_retry.dummy_page = NULL;
158 atomic_set(&drm_np_retry.present, 0);
159 spin_unlock(&drm_np_retry.lock);
163 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
164 unsigned long address,
167 struct fault_data data;
170 *type = VM_FAULT_MINOR;
172 data.address = address;
174 drm_vm_ttm_fault(vma, &data);
178 case VM_FAULT_SIGBUS:
179 return NOPAGE_SIGBUS;
184 return NOPAGE_REFAULT;
189 #ifdef DRM_ODD_MM_COMPAT
192 * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
193 * workaround for a single BUG statement in do_no_page in these versions. The
194 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
195 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
196 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
197 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
198 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
202 typedef struct p_mm_entry {
203 struct list_head head;
204 struct mm_struct *mm;
209 typedef struct vma_entry {
210 struct list_head head;
211 struct vm_area_struct *vma;
215 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
216 unsigned long address,
219 drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
220 unsigned long page_offset;
223 drm_buffer_manager_t *bm;
227 * FIXME: Check can't map aperture flag.
231 *type = VM_FAULT_MINOR;
236 if (address > vma->vm_end)
237 return NOPAGE_SIGBUS;
239 ttm = (drm_ttm_t *) map->offset;
241 mutex_lock(&dev->struct_mutex);
242 drm_fixup_ttm_caching(ttm);
243 BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
246 page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
247 page = ttm->pages[page_offset];
250 if (drm_alloc_memctl(PAGE_SIZE)) {
254 page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
256 drm_free_memctl(PAGE_SIZE);
266 mutex_unlock(&dev->struct_mutex);
273 int drm_ttm_map_bound(struct vm_area_struct *vma)
275 drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
276 drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
279 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
280 unsigned long pfn = ttm->aper_offset +
281 (ttm->be->aperture_base >> PAGE_SHIFT);
282 pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
284 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
285 vma->vm_end - vma->vm_start,
292 int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
294 p_mm_entry_t *entry, *n_entry;
295 vma_entry_t *v_entry;
296 drm_local_map_t *map = (drm_local_map_t *)
297 vma->vm_private_data;
298 struct mm_struct *mm = vma->vm_mm;
300 v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
302 DRM_ERROR("Allocation of vma pointer entry failed\n");
306 map->handle = (void *) v_entry;
307 list_add_tail(&v_entry->head, &ttm->vma_list);
309 list_for_each_entry(entry, &ttm->p_mm_list, head) {
310 if (mm == entry->mm) {
311 atomic_inc(&entry->refcount);
313 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
316 n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
318 DRM_ERROR("Allocation of process mm pointer entry failed\n");
321 INIT_LIST_HEAD(&n_entry->head);
324 atomic_set(&n_entry->refcount, 0);
325 list_add_tail(&n_entry->head, &entry->head);
330 void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
332 p_mm_entry_t *entry, *n;
333 vma_entry_t *v_entry, *v_n;
335 struct mm_struct *mm = vma->vm_mm;
337 list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
338 if (v_entry->vma == vma) {
340 list_del(&v_entry->head);
341 drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
347 list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
348 if (mm == entry->mm) {
349 if (atomic_add_negative(-1, &entry->refcount)) {
350 list_del(&entry->head);
351 BUG_ON(entry->locked);
352 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
362 int drm_ttm_lock_mm(drm_ttm_t * ttm)
367 list_for_each_entry(entry, &ttm->p_mm_list, head) {
368 BUG_ON(entry->locked);
369 if (!down_write_trylock(&entry->mm->mmap_sem)) {
379 list_for_each_entry(entry, &ttm->p_mm_list, head) {
382 up_write(&entry->mm->mmap_sem);
387 * Possible deadlock. Try again. Our callers should handle this
394 void drm_ttm_unlock_mm(drm_ttm_t * ttm)
398 list_for_each_entry(entry, &ttm->p_mm_list, head) {
399 BUG_ON(!entry->locked);
400 up_write(&entry->mm->mmap_sem);
405 int drm_ttm_remap_bound(drm_ttm_t *ttm)
407 vma_entry_t *v_entry;
410 list_for_each_entry(v_entry, &ttm->vma_list, head) {
411 ret = drm_ttm_map_bound(v_entry->vma);
416 drm_ttm_unlock_mm(ttm);
420 void drm_ttm_finish_unmap(drm_ttm_t *ttm)
422 vma_entry_t *v_entry;
424 if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
427 list_for_each_entry(v_entry, &ttm->vma_list, head) {
428 v_entry->vma->vm_flags &= ~VM_PFNMAP;
430 drm_ttm_unlock_mm(ttm);