Big update:
[profile/ivi/libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  * 
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  * 
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors. 
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31 int drm_map_page_into_agp(struct page *page)
32 {
33         int i;
34         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
35         /* Caller's responsibility to call global_flush_tlb() for
36          * performance reasons */
37         return i;
38 }
39
40 int drm_unmap_page_from_agp(struct page *page)
41 {
42         int i;
43         i = change_page_attr(page, 1, PAGE_KERNEL);
44         /* Caller's responsibility to call global_flush_tlb() for
45          * performance reasons */
46         return i;
47 }
48 #endif
49
50
51 pgprot_t vm_get_page_prot(unsigned long vm_flags)
52 {
53 #ifdef MODULE
54         static pgprot_t drm_protection_map[16] = {
55                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
56                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
57         };
58
59         return drm_protection_map[vm_flags & 0x0F];
60 #else
61         extern pgprot_t protection_map[];
62         return protection_map[vm_flags & 0x0F];
63 #endif
64 };
65
66 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
67
68 static int drm_pte_is_clear(struct vm_area_struct *vma,
69                             unsigned long addr)
70 {
71         struct mm_struct *mm = vma->vm_mm;
72         int ret = 1;
73         pte_t *pte;
74         pmd_t *pmd;
75         pud_t *pud;
76         pgd_t *pgd;
77         
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80         spin_lock(&mm->page_table_lock);
81 #else
82         spinlock_t *ptl;
83 #endif
84         
85         pgd = pgd_offset(mm, addr);
86         if (pgd_none(*pgd))
87                 goto unlock;
88         pud = pud_offset(pgd, addr);
89         if (pud_none(*pud))
90                 goto unlock;
91         pmd = pmd_offset(pud, addr);
92         if (pmd_none(*pmd))
93                 goto unlock;
94 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
95         pte = pte_offset_map(pmd, addr);
96 #else 
97         pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 
98 #endif
99         if (!pte)
100                 goto unlock;
101         ret = pte_none(*pte);
102 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
103         pte_unmap(pte);
104  unlock:        
105         spin_unlock(&mm->page_table_lock);
106 #else
107         pte_unmap_unlock(pte, ptl);
108  unlock:
109 #endif
110         return ret;
111 }
112         
113 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
114                   unsigned long pfn, pgprot_t pgprot)
115 {
116         int ret;
117         if (!drm_pte_is_clear(vma, addr))
118                 return -EBUSY;
119
120         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
121         return ret;
122 }
123
124
125 static struct {
126         spinlock_t lock;
127         struct page *dummy_page;
128         atomic_t present;
129 } drm_np_retry = 
130 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
131
132 struct page * get_nopage_retry(void)
133 {
134         if (atomic_read(&drm_np_retry.present) == 0) {
135                 struct page *page = alloc_page(GFP_KERNEL);
136                 if (!page)
137                         return NOPAGE_OOM;
138                 spin_lock(&drm_np_retry.lock);
139                 drm_np_retry.dummy_page = page;
140                 atomic_set(&drm_np_retry.present,1);
141                 spin_unlock(&drm_np_retry.lock);
142         }
143         get_page(drm_np_retry.dummy_page);
144         return drm_np_retry.dummy_page;
145 }
146
147 void free_nopage_retry(void)
148 {
149         if (atomic_read(&drm_np_retry.present) == 1) {
150                 spin_lock(&drm_np_retry.lock);
151                 __free_page(drm_np_retry.dummy_page);
152                 drm_np_retry.dummy_page = NULL;
153                 atomic_set(&drm_np_retry.present, 0);
154                 spin_unlock(&drm_np_retry.lock);
155         }
156 }
157 #endif
158
159 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
160
161 struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
162                                unsigned long address, 
163                                int *type)
164 {
165         struct fault_data data;
166
167         if (type)
168                 *type = VM_FAULT_MINOR;
169
170         data.address = address;
171         data.vma = vma;
172         drm_vm_ttm_fault(vma, &data);
173         switch (data.type) {
174         case VM_FAULT_OOM:
175                 return NOPAGE_OOM;
176         case VM_FAULT_SIGBUS:
177                 return NOPAGE_SIGBUS;
178         default:
179                 break;
180         }
181
182         return NOPAGE_REFAULT;
183 }
184
185 #endif