Merge branch 'master' into vblank-rework, including mach64 support
[platform/upstream/libdrm.git] / linux-core / drm_drv.c
1 /**
2  * \file drm_drv.c
3  * Generic driver template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  *
8  * To use this template, you must at least define the following (samples
9  * given for the MGA driver):
10  *
11  * \code
12  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
13  *
14  * #define DRIVER_NAME          "mga"
15  * #define DRIVER_DESC          "Matrox G200/G400"
16  * #define DRIVER_DATE          "20001127"
17  *
18  * #define drm_x                mga_##x
19  * \endcode
20  */
21
22 /*
23  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
24  *
25  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
26  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
27  * All Rights Reserved.
28  *
29  * Permission is hereby granted, free of charge, to any person obtaining a
30  * copy of this software and associated documentation files (the "Software"),
31  * to deal in the Software without restriction, including without limitation
32  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33  * and/or sell copies of the Software, and to permit persons to whom the
34  * Software is furnished to do so, subject to the following conditions:
35  *
36  * The above copyright notice and this permission notice (including the next
37  * paragraph) shall be included in all copies or substantial portions of the
38  * Software.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
43  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
44  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
45  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
46  * OTHER DEALINGS IN THE SOFTWARE.
47  */
48 #include "drmP.h"
49 #include "drm_core.h"
50
51 static void drm_cleanup(struct drm_device * dev);
52 int drm_fb_loaded = 0;
53
54 static int drm_version(struct drm_device *dev, void *data,
55                        struct drm_file *file_priv);
56
57 /** Ioctl table */
58 static struct drm_ioctl_desc drm_ioctls[] = {
59         DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
60         DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
61         DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
62         DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
63         DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
64         DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
65         DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
66         DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
67
68         DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69         DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70         DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
71         DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72
73         DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
74         DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
75
76         DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77         DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
78
79         DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
80         DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
81         DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82         DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
83         DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
84         DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
85         DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
86
87         DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
88         DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
89
90         DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
91         DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
92
93         DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
94
95         DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
96         DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
97         DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
98         DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
99         DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
100         /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
101         DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
102
103         DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104
105 #if __OS_HAS_AGP
106         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107         DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109         DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
110         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111         DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112         DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113         DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 #endif
115
116         DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117         DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118         DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
119
120         DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
121
122         DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123
124         DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
125                       DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126         DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
127                       DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
128         DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
129                       DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
130         DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
131                       DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
132
133         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
134         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
135         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
136         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
137         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
138         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
139         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
140         DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
141
142         DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
143         DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
144         DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
145         DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
146         DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
147         DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
148         DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
149         DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
150         DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
151 };
152
153 #define DRM_CORE_IOCTL_COUNT    ARRAY_SIZE( drm_ioctls )
154
155
156 /**
157  * Take down the DRM device.
158  *
159  * \param dev DRM device structure.
160  *
161  * Frees every resource in \p dev.
162  *
163  * \sa drm_device
164  */
165 int drm_lastclose(struct drm_device * dev)
166 {
167         struct drm_magic_entry *pt, *next;
168         struct drm_map_list *r_list, *list_t;
169         struct drm_vma_entry *vma, *vma_temp;
170         int i;
171
172         DRM_DEBUG("\n");
173
174         /*
175          * We can't do much about this function failing.
176          */
177
178         drm_bo_driver_finish(dev);
179
180         if (dev->driver->lastclose)
181                 dev->driver->lastclose(dev);
182         DRM_DEBUG("driver lastclose completed\n");
183
184         if (dev->unique) {
185                 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
186                 dev->unique = NULL;
187                 dev->unique_len = 0;
188         }
189
190         if (dev->irq_enabled)
191                 drm_irq_uninstall(dev);
192
193         /* Free drawable information memory */
194         mutex_lock(&dev->struct_mutex);
195
196         drm_drawable_free_all(dev);
197         del_timer(&dev->timer);
198
199         if (dev->unique) {
200                 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
201                 dev->unique = NULL;
202                 dev->unique_len = 0;
203         }
204
205         if (dev->magicfree.next) {
206                 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
207                         list_del(&pt->head);
208                         drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
209                         drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
210                 }
211                 drm_ht_remove(&dev->magiclist);
212         }
213
214
215         /* Clear AGP information */
216         if (drm_core_has_AGP(dev) && dev->agp) {
217                 struct drm_agp_mem *entry, *tempe;
218
219                 /* Remove AGP resources, but leave dev->agp
220                    intact until drv_cleanup is called. */
221                 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
222                         if (entry->bound)
223                                 drm_unbind_agp(entry->memory);
224                         drm_free_agp(entry->memory, entry->pages);
225                         drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
226                 }
227                 INIT_LIST_HEAD(&dev->agp->memory);
228
229                 if (dev->agp->acquired)
230                         drm_agp_release(dev);
231
232                 dev->agp->acquired = 0;
233                 dev->agp->enabled = 0;
234         }
235         if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
236                 drm_sg_cleanup(dev->sg);
237                 dev->sg = NULL;
238         }
239
240         /* Clear vma list (only built for debugging) */
241         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
242                 list_del(&vma->head);
243                 drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
244         }
245
246         list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
247                 if (!(r_list->map->flags & _DRM_DRIVER)) {
248                         drm_rmmap_locked(dev, r_list->map);
249                         r_list = NULL;
250                 }
251         }
252
253         if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
254                 for (i = 0; i < dev->queue_count; i++) {
255
256                         if (dev->queuelist[i]) {
257                                 drm_free(dev->queuelist[i],
258                                          sizeof(*dev->queuelist[0]),
259                                          DRM_MEM_QUEUES);
260                                 dev->queuelist[i] = NULL;
261                         }
262                 }
263                 drm_free(dev->queuelist,
264                          dev->queue_slots * sizeof(*dev->queuelist),
265                          DRM_MEM_QUEUES);
266                 dev->queuelist = NULL;
267         }
268         dev->queue_count = 0;
269
270         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
271                 drm_dma_takedown(dev);
272
273         if (dev->lock.hw_lock) {
274                 dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
275                 dev->lock.file_priv = NULL;
276                 wake_up_interruptible(&dev->lock.lock_queue);
277         }
278         dev->dev_mapping = NULL;
279         mutex_unlock(&dev->struct_mutex);
280
281         DRM_DEBUG("lastclose completed\n");
282         return 0;
283 }
284
285 void drm_cleanup_pci(struct pci_dev *pdev)
286 {
287         struct drm_device *dev = pci_get_drvdata(pdev);
288
289         pci_set_drvdata(pdev, NULL);
290         pci_release_regions(pdev);
291         if (dev)
292                 drm_cleanup(dev);
293 }
294 EXPORT_SYMBOL(drm_cleanup_pci);
295
296 /**
297  * Module initialization. Called via init_module at module load time, or via
298  * linux/init/main.c (this is not currently supported).
299  *
300  * \return zero on success or a negative number on failure.
301  *
302  * Initializes an array of drm_device structures, and attempts to
303  * initialize all available devices, using consecutive minors, registering the
304  * stubs and initializing the AGP device.
305  *
306  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
307  * after the initialization for driver customization.
308  */
309 int drm_init(struct drm_driver *driver,
310                        struct pci_device_id *pciidlist)
311 {
312         struct pci_dev *pdev;
313         struct pci_device_id *pid;
314         int rc, i;
315
316         DRM_DEBUG("\n");
317
318         for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
319                 pid = &pciidlist[i];
320
321                 pdev = NULL;
322                 /* pass back in pdev to account for multiple identical cards */
323                 while ((pdev =
324                         pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
325                                        pid->subdevice, pdev))) {
326                         /* Are there device class requirements? */
327                         if ((pid->class != 0)
328                                 && ((pdev->class & pid->class_mask) != pid->class)) {
329                                 continue;
330                         }
331                         /* is there already a driver loaded, or (short circuit saves work) */
332                         /* does something like VesaFB have control of the memory region? */
333                         if (pci_dev_driver(pdev)
334                             || pci_request_regions(pdev, "DRM scan")) {
335                                 /* go into stealth mode */
336                                 drm_fb_loaded = 1;
337                                 pci_dev_put(pdev);
338                                 break;
339                         }
340                         /* no fbdev or vesadev, put things back and wait for normal probe */
341                         pci_release_regions(pdev);
342                 }
343         }
344
345         if (!drm_fb_loaded)
346                 return pci_register_driver(&driver->pci_driver);
347         else {
348                 for (i = 0; pciidlist[i].vendor != 0; i++) {
349                         pid = &pciidlist[i];
350
351                         pdev = NULL;
352                         /* pass back in pdev to account for multiple identical cards */
353                         while ((pdev =
354                                 pci_get_subsys(pid->vendor, pid->device,
355                                                pid->subvendor, pid->subdevice,
356                                                pdev))) {
357                                 /* Are there device class requirements? */
358                                 if ((pid->class != 0)
359                                         && ((pdev->class & pid->class_mask) != pid->class)) {
360                                         continue;
361                                 }
362                                 /* stealth mode requires a manual probe */
363                                 pci_dev_get(pdev);
364                                 if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
365                                         pci_dev_put(pdev);
366                                         return rc;
367                                 }
368                         }
369                 }
370                 DRM_INFO("Used old pci detect: framebuffer loaded\n");
371         }
372         return 0;
373 }
374 EXPORT_SYMBOL(drm_init);
375
376 /**
377  * Called via cleanup_module() at module unload time.
378  *
379  * Cleans up all DRM device, calling drm_lastclose().
380  *
381  * \sa drm_init
382  */
383 static void drm_cleanup(struct drm_device * dev)
384 {
385
386         DRM_DEBUG("\n");
387         if (!dev) {
388                 DRM_ERROR("cleanup called no dev\n");
389                 return;
390         }
391
392         drm_lastclose(dev);
393         drm_fence_manager_takedown(dev);
394
395         if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
396             && dev->agp->agp_mtrr >= 0) {
397                 int retval;
398                 retval = mtrr_del(dev->agp->agp_mtrr,
399                                   dev->agp->agp_info.aper_base,
400                                   dev->agp->agp_info.aper_size * 1024 * 1024);
401                 DRM_DEBUG("mtrr_del=%d\n", retval);
402         }
403
404         if (drm_core_has_AGP(dev) && dev->agp) {
405                 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
406                 dev->agp = NULL;
407         }
408         if (dev->driver->unload)
409                 dev->driver->unload(dev);
410
411         if (!drm_fb_loaded)
412                 pci_disable_device(dev->pdev);
413
414         drm_ctxbitmap_cleanup(dev);
415         drm_ht_remove(&dev->map_hash);
416         drm_mm_takedown(&dev->offset_manager);
417         drm_ht_remove(&dev->object_hash);
418
419         drm_put_head(&dev->primary);
420         if (drm_put_dev(dev))
421                 DRM_ERROR("Cannot unload module\n");
422 }
423
424 void drm_exit(struct drm_driver *driver)
425 {
426         int i;
427         struct drm_device *dev = NULL;
428         struct drm_head *head;
429
430         DRM_DEBUG("\n");
431         if (drm_fb_loaded) {
432                 for (i = 0; i < drm_cards_limit; i++) {
433                         head = drm_heads[i];
434                         if (!head)
435                                 continue;
436                         if (!head->dev)
437                                 continue;
438                         if (head->dev->driver != driver)
439                                 continue;
440                         dev = head->dev;
441                         if (dev) {
442                                 /* release the pci driver */
443                                 if (dev->pdev)
444                                         pci_dev_put(dev->pdev);
445                                 drm_cleanup(dev);
446                         }
447                 }
448         } else
449                 pci_unregister_driver(&driver->pci_driver);
450 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
451         free_nopage_retry();
452 #endif
453         DRM_INFO("Module unloaded\n");
454 }
455 EXPORT_SYMBOL(drm_exit);
456
457 /** File operations structure */
458 static const struct file_operations drm_stub_fops = {
459         .owner = THIS_MODULE,
460         .open = drm_stub_open
461 };
462
463 static int __init drm_core_init(void)
464 {
465         int ret;
466         struct sysinfo si;
467         unsigned long avail_memctl_mem;
468         unsigned long max_memctl_mem;
469
470         si_meminfo(&si);
471
472         /*
473          * AGP only allows low / DMA32 memory ATM.
474          */
475
476         avail_memctl_mem = si.totalram - si.totalhigh;
477
478         /*
479          * Avoid overflows
480          */
481
482         max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
483         max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
484
485         if (avail_memctl_mem >= max_memctl_mem)
486                 avail_memctl_mem = max_memctl_mem;
487
488         drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
489
490         ret = -ENOMEM;
491         drm_cards_limit =
492             (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
493         drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
494         if (!drm_heads)
495                 goto err_p1;
496
497         if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
498                 goto err_p1;
499
500         drm_class = drm_sysfs_create(THIS_MODULE, "drm");
501         if (IS_ERR(drm_class)) {
502                 printk(KERN_ERR "DRM: Error creating drm class.\n");
503                 ret = PTR_ERR(drm_class);
504                 goto err_p2;
505         }
506
507         drm_proc_root = proc_mkdir("dri", NULL);
508         if (!drm_proc_root) {
509                 DRM_ERROR("Cannot create /proc/dri\n");
510                 ret = -1;
511                 goto err_p3;
512         }
513
514         drm_mem_init();
515
516         DRM_INFO("Initialized %s %d.%d.%d %s\n",
517                  CORE_NAME,
518                  CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
519         return 0;
520 err_p3:
521         drm_sysfs_destroy();
522 err_p2:
523         unregister_chrdev(DRM_MAJOR, "drm");
524         drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
525 err_p1:
526         return ret;
527 }
528
529 static void __exit drm_core_exit(void)
530 {
531         remove_proc_entry("dri", NULL);
532         drm_sysfs_destroy();
533
534         unregister_chrdev(DRM_MAJOR, "drm");
535
536         drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
537 }
538
539 module_init(drm_core_init);
540 module_exit(drm_core_exit);
541
542 /**
543  * Get version information
544  *
545  * \param inode device inode.
546  * \param file_priv DRM file private.
547  * \param cmd command.
548  * \param arg user argument, pointing to a drm_version structure.
549  * \return zero on success or negative number on failure.
550  *
551  * Fills in the version information in \p arg.
552  */
553 static int drm_version(struct drm_device *dev, void *data,
554                        struct drm_file *file_priv)
555 {
556         struct drm_version *version = data;
557         int len;
558
559         version->version_major = dev->driver->major;
560         version->version_minor = dev->driver->minor;
561         version->version_patchlevel = dev->driver->patchlevel;
562         DRM_COPY(version->name, dev->driver->name);
563         DRM_COPY(version->date, dev->driver->date);
564         DRM_COPY(version->desc, dev->driver->desc);
565
566         return 0;
567 }
568
569 /**
570  * Called whenever a process performs an ioctl on /dev/drm.
571  *
572  * \param inode device inode.
573  * \param file_priv DRM file private.
574  * \param cmd command.
575  * \param arg user argument.
576  * \return zero on success or negative number on failure.
577  *
578  * Looks up the ioctl function in the ::ioctls table, checking for root
579  * previleges if so required, and dispatches to the respective function.
580  *
581  * Copies data in and out according to the size and direction given in cmd,
582  * which must match the ioctl cmd known by the kernel.  The kernel uses a 512
583  * byte stack buffer to store the ioctl arguments in kernel space.  Should we
584  * ever need much larger ioctl arguments, we may need to allocate memory.
585  */
586 int drm_ioctl(struct inode *inode, struct file *filp,
587               unsigned int cmd, unsigned long arg)
588 {
589         return drm_unlocked_ioctl(filp, cmd, arg);
590 }
591 EXPORT_SYMBOL(drm_ioctl);
592
593 long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
594 {
595         struct drm_file *file_priv = filp->private_data;
596         struct drm_device *dev = file_priv->head->dev;
597         struct drm_ioctl_desc *ioctl;
598         drm_ioctl_t *func;
599         unsigned int nr = DRM_IOCTL_NR(cmd);
600         int retcode = -EINVAL;
601         char kdata[512];
602
603         atomic_inc(&dev->ioctl_count);
604         atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
605         ++file_priv->ioctl_count;
606
607         DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
608                   current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device),
609                   file_priv->authenticated);
610
611         if ((nr >= DRM_CORE_IOCTL_COUNT) &&
612             ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
613                 goto err_i1;
614         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
615                 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
616                 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
617         else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
618                 ioctl = &drm_ioctls[nr];
619         else {
620                 retcode = -EINVAL;
621                 goto err_i1;
622         }
623 #if 0
624         /*
625          * This check is disabled, because driver private ioctl->cmd
626          * are not the ioctl commands with size and direction bits but
627          * just the indices. The DRM core ioctl->cmd are the proper ioctl
628          * commands. The drivers' ioctl tables need to be fixed.
629          */
630         if (ioctl->cmd != cmd) {
631                 retcode = -EINVAL;
632                 goto err_i1;
633         }
634 #endif
635         func = ioctl->func;
636         /* is there a local override? */
637         if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
638                 func = dev->driver->dma_ioctl;
639
640         if (cmd & IOC_IN) {
641                 if (copy_from_user(kdata, (void __user *)arg,
642                                    _IOC_SIZE(cmd)) != 0) {
643                         retcode = -EACCES;
644                         goto err_i1;
645                 }
646         }
647
648         if (!func) {
649                 DRM_DEBUG("no function\n");
650                 retcode = -EINVAL;
651         } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
652                    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
653                    ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
654                 retcode = -EACCES;
655         } else {
656                 retcode = func(dev, kdata, file_priv);
657         }
658
659         if ((retcode == 0) && (cmd & IOC_OUT)) {
660                 if (copy_to_user((void __user *)arg, kdata,
661                                  _IOC_SIZE(cmd)) != 0)
662                         retcode = -EACCES;
663         }
664
665 err_i1:
666         atomic_dec(&dev->ioctl_count);
667         if (retcode)
668                 DRM_DEBUG("ret = %d\n", retcode);
669         return retcode;
670 }
671 EXPORT_SYMBOL(drm_unlocked_ioctl);
672
673 drm_local_map_t *drm_getsarea(struct drm_device *dev)
674 {
675         struct drm_map_list *entry;
676
677         list_for_each_entry(entry, &dev->maplist, head) {
678                 if (entry->map && entry->map->type == _DRM_SHM &&
679                     (entry->map->flags & _DRM_CONTAINS_LOCK)) {
680                         return entry->map;
681                 }
682         }
683         return NULL;
684 }
685 EXPORT_SYMBOL(drm_getsarea);