Merged mga branch with trunk
[platform/upstream/libdrm.git] / linux / vm.c
1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2  * Created: Mon Jan  4 08:58:31 1999 by faith@precisioninsight.com
3  * Revised: Mon Feb 14 00:16:45 2000 by kevin@precisioninsight.com
4  *
5  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  * 
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  * 
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  * 
27  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/vm.c,v 1.5 2000/02/23 04:47:31 martin Exp $
28  *
29  */
30
31 #define __NO_VERSION__
32 #include "drmP.h"
33
34 struct vm_operations_struct   drm_vm_ops = {
35         nopage:  drm_vm_nopage,
36         open:    drm_vm_open,
37         close:   drm_vm_close,
38 };
39
40 struct vm_operations_struct   drm_vm_shm_ops = {
41         nopage:  drm_vm_shm_nopage,
42         open:    drm_vm_open,
43         close:   drm_vm_close,
44 };
45
46 struct vm_operations_struct   drm_vm_dma_ops = {
47         nopage:  drm_vm_dma_nopage,
48         open:    drm_vm_open,
49         close:   drm_vm_close,
50 };
51
52 #if LINUX_VERSION_CODE < 0x020317
53 unsigned long drm_vm_nopage(struct vm_area_struct *vma,
54                             unsigned long address,
55                             int write_access)
56 #else
57                                 /* Return type changed in 2.3.23 */
58 struct page *drm_vm_nopage(struct vm_area_struct *vma,
59                            unsigned long address,
60                            int write_access)
61 #endif
62 {
63         DRM_DEBUG("0x%08lx, %d\n", address, write_access);
64
65         return NOPAGE_SIGBUS;           /* Disallow mremap */
66 }
67
68 #if LINUX_VERSION_CODE < 0x020317
69 unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
70                                 unsigned long address,
71                                 int write_access)
72 #else
73                                 /* Return type changed in 2.3.23 */
74 struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
75                                unsigned long address,
76                                int write_access)
77 #endif
78 {
79         drm_file_t       *priv   = vma->vm_file->private_data;
80         drm_device_t     *dev    = priv->dev;
81         unsigned long    physical;
82         unsigned long    offset;
83         unsigned long    page;
84
85         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
86         if (!dev->lock.hw_lock)    return NOPAGE_OOM;  /* Nothing allocated */
87
88         offset   = address - vma->vm_start;
89         page     = offset >> PAGE_SHIFT;
90         physical = (unsigned long)dev->lock.hw_lock + (offset & (~PAGE_MASK));
91         atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
92
93         DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
94 #if LINUX_VERSION_CODE < 0x020317
95         return physical;
96 #else
97         return mem_map + MAP_NR(physical);
98 #endif
99 }
100
101 #if LINUX_VERSION_CODE < 0x020317
102 unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
103                                 unsigned long address,
104                                 int write_access)
105 #else
106                                 /* Return type changed in 2.3.23 */
107 struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
108                                unsigned long address,
109                                int write_access)
110 #endif
111 {
112         drm_file_t       *priv   = vma->vm_file->private_data;
113         drm_device_t     *dev    = priv->dev;
114         drm_device_dma_t *dma    = dev->dma;
115         unsigned long    physical;
116         unsigned long    offset;
117         unsigned long    page;
118
119         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
120         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
121         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
122
123         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
124         page     = offset >> PAGE_SHIFT;
125         physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
126         atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
127
128         DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
129 #if LINUX_VERSION_CODE < 0x020317
130         return physical;
131 #else
132         return mem_map + MAP_NR(physical);
133 #endif
134 }
135
136 void drm_vm_open(struct vm_area_struct *vma)
137 {
138         drm_file_t      *priv   = vma->vm_file->private_data;
139         drm_device_t    *dev    = priv->dev;
140 #if DRM_DEBUG_CODE
141         drm_vma_entry_t *vma_entry;
142 #endif
143
144         DRM_DEBUG("0x%08lx,0x%08lx\n",
145                   vma->vm_start, vma->vm_end - vma->vm_start);
146         atomic_inc(&dev->vma_count);
147         MOD_INC_USE_COUNT;
148
149 #if DRM_DEBUG_CODE
150         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
151         if (vma_entry) {
152                 down(&dev->struct_sem);
153                 vma_entry->vma  = vma;
154                 vma_entry->next = dev->vmalist;
155                 vma_entry->pid  = current->pid;
156                 dev->vmalist    = vma_entry;
157                 up(&dev->struct_sem);
158         }
159 #endif
160 }
161
162 void drm_vm_close(struct vm_area_struct *vma)
163 {
164         drm_file_t      *priv   = vma->vm_file->private_data;
165         drm_device_t    *dev    = priv->dev;
166 #if DRM_DEBUG_CODE
167         drm_vma_entry_t *pt, *prev;
168 #endif
169
170         DRM_DEBUG("0x%08lx,0x%08lx\n",
171                   vma->vm_start, vma->vm_end - vma->vm_start);
172         MOD_DEC_USE_COUNT;
173         atomic_dec(&dev->vma_count);
174
175 #if DRM_DEBUG_CODE
176         down(&dev->struct_sem);
177         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
178                 if (pt->vma == vma) {
179                         if (prev) {
180                                 prev->next = pt->next;
181                         } else {
182                                 dev->vmalist = pt->next;
183                         }
184                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
185                         break;
186                 }
187         }
188         up(&dev->struct_sem);
189 #endif
190 }
191
192 int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
193 {
194         drm_file_t       *priv   = filp->private_data;
195         drm_device_t     *dev    = priv->dev;
196         drm_device_dma_t *dma    = dev->dma;
197         unsigned long    length  = vma->vm_end - vma->vm_start;
198         
199         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
200                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
201
202                                 /* Length must match exact page count */
203         if ((length >> PAGE_SHIFT) != dma->page_count) return -EINVAL;
204
205         vma->vm_ops   = &drm_vm_dma_ops;
206         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
207         
208 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
209                                 /* In Linux 2.2.3 and above, this is
210                                    handled in do_mmap() in mm/mmap.c. */
211         ++filp->f_count;
212 #endif
213         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
214         drm_vm_open(vma);
215         return 0;
216 }
217
218 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
219 {
220         drm_file_t      *priv   = filp->private_data;
221         drm_device_t    *dev    = priv->dev;
222         drm_map_t       *map    = NULL;
223         int             i;
224         
225         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
226                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
227
228         if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
229
230                                 /* A sequential search of a linked list is
231                                    fine here because: 1) there will only be
232                                    about 5-10 entries in the list and, 2) a
233                                    DRI client only has to do this mapping
234                                    once, so it doesn't have to be optimized
235                                    for performance, even if the list was a
236                                    bit longer. */
237         for (i = 0; i < dev->map_count; i++) {
238                 map = dev->maplist[i];
239                 if (map->offset == VM_OFFSET(vma)) break;
240         }
241         
242         if (i >= dev->map_count) return -EINVAL;
243         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
244                 return -EPERM;
245
246                                 /* Check for valid size. */
247         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
248         
249
250         switch (map->type) {
251         case _DRM_FRAME_BUFFER:
252         case _DRM_REGISTERS:
253         case _DRM_AGP:
254                 if (VM_OFFSET(vma) >= __pa(high_memory)) {
255 #if defined(__i386__)
256                         if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
257                                 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
258                                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
259                         }
260 #endif
261                         vma->vm_flags |= VM_IO; /* not in core dump */
262                 }
263                 if (remap_page_range(vma->vm_start,
264                                      VM_OFFSET(vma),
265                                      vma->vm_end - vma->vm_start,
266                                      vma->vm_page_prot))
267                                 return -EAGAIN;
268                 vma->vm_ops = &drm_vm_ops;
269                 break;
270         case _DRM_SHM:
271                 vma->vm_ops = &drm_vm_shm_ops;
272                                 /* Don't let this area swap.  Change when
273                                    DRM_KERNEL advisory is supported. */
274                 vma->vm_flags |= VM_LOCKED;
275                 break;
276         default:
277                 return -EINVAL; /* This should never happen. */
278         }
279         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
280         if (map->flags & _DRM_READ_ONLY) {
281 #if defined(__i386__)
282                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
283 #else
284                                 /* Ye gads this is ugly.  With more thought
285                                    we could move this up higher and use
286                                    `protection_map' instead.  */
287                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
288                         __pte(pgprot_val(vma->vm_page_prot)))));
289 #endif
290         }
291
292         
293 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
294                                 /* In Linux 2.2.3 and above, this is
295                                    handled in do_mmap() in mm/mmap.c. */
296         ++filp->f_count;
297 #endif
298         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
299         drm_vm_open(vma);
300         return 0;
301 }