whitespace fixups from kernel
[platform/upstream/libdrm.git] / linux-core / drm_drv.c
1 /**
2  * \file drm_drv.c
3  * Generic driver template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  *
8  * To use this template, you must at least define the following (samples
9  * given for the MGA driver):
10  *
11  * \code
12  * #define DRIVER_AUTHOR        "VA Linux Systems, Inc."
13  *
14  * #define DRIVER_NAME          "mga"
15  * #define DRIVER_DESC          "Matrox G200/G400"
16  * #define DRIVER_DATE          "20001127"
17  *
18  * #define drm_x                mga_##x
19  * \endcode
20  */
21
22 /*
23  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
24  *
25  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
26  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
27  * All Rights Reserved.
28  *
29  * Permission is hereby granted, free of charge, to any person obtaining a
30  * copy of this software and associated documentation files (the "Software"),
31  * to deal in the Software without restriction, including without limitation
32  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33  * and/or sell copies of the Software, and to permit persons to whom the
34  * Software is furnished to do so, subject to the following conditions:
35  *
36  * The above copyright notice and this permission notice (including the next
37  * paragraph) shall be included in all copies or substantial portions of the
38  * Software.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
43  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
44  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
45  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
46  * OTHER DEALINGS IN THE SOFTWARE.
47  */
48 #include "drmP.h"
49 #include "drm_core.h"
50
51 static void drm_cleanup(drm_device_t * dev);
52 int drm_fb_loaded = 0;
53
54 static int drm_version(struct inode *inode, struct file *filp,
55                 unsigned int cmd, unsigned long arg);
56
57 /** Ioctl table */
58 static drm_ioctl_desc_t drm_ioctls[] = {
59         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
60         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
61         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
62         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
63         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
64         [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
65         [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
66         [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
67
68         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
69         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
70         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
71         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
72
73         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
74         [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
75
76         [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
77         [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
78
79         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
80         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
81         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
82         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
83         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
84         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
85         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
86
87         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
88         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
89
90         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
91         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
92
93         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
94
95         [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
96         [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
97         [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
98         [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
99         [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
100         /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
101         [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
102
103         [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
104
105 #if __OS_HAS_AGP
106         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
107         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
108         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
109         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
110         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
111         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
112         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
113         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
114 #endif
115
116         [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
117         [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
118
119         [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
120
121         [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
122
123         [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
124
125
126         [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, 
127                                              DRM_AUTH },
128         [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, 
129                                              DRM_AUTH },
130         [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, 
131                                              DRM_AUTH },
132         [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, 
133                                              DRM_AUTH },
134
135         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH},
136         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH},
137         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH},
138         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH},
139         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH},
140         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH},
141         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH},
142         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH},
143         [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH},
144
145 };
146
147 #define DRM_CORE_IOCTL_COUNT    ARRAY_SIZE( drm_ioctls )
148
149
150 /**
151  * Take down the DRM device.
152  *
153  * \param dev DRM device structure.
154  *
155  * Frees every resource in \p dev.
156  *
157  * \sa drm_device
158  */
159 int drm_lastclose(drm_device_t * dev)
160 {
161         drm_magic_entry_t *pt, *next;
162         drm_map_list_t *r_list, *list_t;
163         drm_vma_entry_t *vma, *vma_temp;
164         int i;
165
166         DRM_DEBUG("\n");
167
168         /*
169          * We can't do much about this function failing.
170          */
171
172         drm_bo_driver_finish(dev);
173
174         if (dev->driver->lastclose)
175                 dev->driver->lastclose(dev);
176         DRM_DEBUG("driver lastclose completed\n");
177
178         if (dev->unique) {
179                 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
180                 dev->unique=NULL;
181                 dev->unique_len=0;
182         }
183
184         if (dev->irq_enabled)
185                 drm_irq_uninstall(dev);
186
187         /* Free drawable information memory */
188         for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
189              i++) {
190                 drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
191
192                 if (info) {
193                         drm_free(info->rects, info->num_rects *
194                                  sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
195                         drm_free(info, sizeof(*info), DRM_MEM_BUFS);
196                 }
197         }
198
199         mutex_lock(&dev->struct_mutex);
200         del_timer(&dev->timer);
201
202         if (dev->unique) {
203                 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
204                 dev->unique = NULL;
205                 dev->unique_len = 0;
206         }
207
208         if (dev->magicfree.next) {
209                 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
210                         list_del(&pt->head);
211                         drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
212                         drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
213                 }
214                 drm_ht_remove(&dev->magiclist);
215         }
216
217
218         /* Clear AGP information */
219         if (drm_core_has_AGP(dev) && dev->agp) {
220                 drm_agp_mem_t *entry, *tempe;
221
222                 /* Remove AGP resources, but leave dev->agp
223                    intact until drv_cleanup is called. */
224                 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
225                         if (entry->bound)
226                                 drm_unbind_agp(entry->memory);
227                         drm_free_agp(entry->memory, entry->pages);
228                         drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
229                 }
230                 INIT_LIST_HEAD(&dev->agp->memory);
231
232                 if (dev->agp->acquired)
233                         drm_agp_release(dev);
234
235                 dev->agp->acquired = 0;
236                 dev->agp->enabled = 0;
237         }
238         if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
239                 drm_sg_cleanup(dev->sg);
240                 dev->sg = NULL;
241         }
242
243         /* Clear vma list (only built for debugging) */
244         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
245                 list_del(&vma->head);
246                 drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
247         }
248         
249         list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
250                 drm_rmmap_locked(dev, r_list->map);
251                 r_list = NULL;
252         }
253
254         if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
255                 for (i = 0; i < dev->queue_count; i++) {
256
257                         if (dev->queuelist[i]) {
258                                 drm_free(dev->queuelist[i],
259                                          sizeof(*dev->queuelist[0]),
260                                          DRM_MEM_QUEUES);
261                                 dev->queuelist[i] = NULL;
262                         }
263                 }
264                 drm_free(dev->queuelist,
265                          dev->queue_slots * sizeof(*dev->queuelist),
266                          DRM_MEM_QUEUES);
267                 dev->queuelist = NULL;
268         }
269         dev->queue_count = 0;
270
271         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
272                 drm_dma_takedown(dev);
273
274         if (dev->lock.hw_lock) {
275                 dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
276                 dev->lock.filp = NULL;
277                 wake_up_interruptible(&dev->lock.lock_queue);
278         }
279         dev->dev_mapping = NULL;
280         mutex_unlock(&dev->struct_mutex);
281
282         DRM_DEBUG("lastclose completed\n");
283         return 0;
284 }
285
286 void drm_cleanup_pci(struct pci_dev *pdev)
287 {
288         drm_device_t *dev = pci_get_drvdata(pdev);
289
290         pci_set_drvdata(pdev, NULL);
291         pci_release_regions(pdev);
292         if (dev)
293                 drm_cleanup(dev);
294 }
295 EXPORT_SYMBOL(drm_cleanup_pci);
296
297 /**
298  * Module initialization. Called via init_module at module load time, or via
299  * linux/init/main.c (this is not currently supported).
300  *
301  * \return zero on success or a negative number on failure.
302  *
303  * Initializes an array of drm_device structures, and attempts to
304  * initialize all available devices, using consecutive minors, registering the
305  * stubs and initializing the AGP device.
306  *
307  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
308  * after the initialization for driver customization.
309  */
310 int drm_init(struct drm_driver *driver,
311                        struct pci_device_id *pciidlist)
312 {
313         struct pci_dev *pdev;
314         struct pci_device_id *pid;
315         int rc, i;
316
317         DRM_DEBUG("\n");
318
319         for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
320                 pid = &pciidlist[i];
321
322                 pdev = NULL;
323                 /* pass back in pdev to account for multiple identical cards */
324                 while ((pdev =
325                         pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
326                                        pid->subdevice, pdev))) {
327                         /* is there already a driver loaded, or (short circuit saves work) */
328                         /* does something like VesaFB have control of the memory region? */
329                         if (pci_dev_driver(pdev)
330                             || pci_request_regions(pdev, "DRM scan")) {
331                                 /* go into stealth mode */
332                                 drm_fb_loaded = 1;
333                                 pci_dev_put(pdev);
334                                 break;
335                         }
336                         /* no fbdev or vesadev, put things back and wait for normal probe */
337                         pci_release_regions(pdev);
338                 }
339         }
340
341         if (!drm_fb_loaded)
342                 pci_register_driver(&driver->pci_driver);
343         else {
344                 for (i = 0; pciidlist[i].vendor != 0; i++) {
345                         pid = &pciidlist[i];
346
347                         pdev = NULL;
348                         /* pass back in pdev to account for multiple identical cards */
349                         while ((pdev =
350                                 pci_get_subsys(pid->vendor, pid->device,
351                                                pid->subvendor, pid->subdevice,
352                                                pdev))) {
353                                 /* stealth mode requires a manual probe */
354                                 pci_dev_get(pdev);
355                                 if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
356                                         pci_dev_put(pdev);
357                                         return rc;
358                                 }
359                         }
360                 }
361                 DRM_INFO("Used old pci detect: framebuffer loaded\n");
362         }
363         return 0;
364 }
365 EXPORT_SYMBOL(drm_init);
366
367 /**
368  * Called via cleanup_module() at module unload time.
369  *
370  * Cleans up all DRM device, calling drm_lastclose().
371  *
372  * \sa drm_init
373  */
374 static void drm_cleanup(drm_device_t * dev)
375 {
376
377         DRM_DEBUG("\n");
378         if (!dev) {
379                 DRM_ERROR("cleanup called no dev\n");
380                 return;
381         }
382
383         drm_lastclose(dev);
384         drm_fence_manager_takedown(dev);
385
386         drm_ht_remove(&dev->map_hash);
387         drm_mm_takedown(&dev->offset_manager);
388         drm_ht_remove(&dev->object_hash);
389
390         if (!drm_fb_loaded)
391                 pci_disable_device(dev->pdev);
392
393         drm_ctxbitmap_cleanup(dev);
394
395         if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
396             && dev->agp->agp_mtrr >= 0) {
397                 int retval;
398                 retval = mtrr_del(dev->agp->agp_mtrr,
399                                   dev->agp->agp_info.aper_base,
400                                   dev->agp->agp_info.aper_size * 1024 * 1024);
401                 DRM_DEBUG("mtrr_del=%d\n", retval);
402         }
403
404         if (drm_core_has_AGP(dev) && dev->agp) {
405                 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
406                 dev->agp = NULL;
407         }
408         if (dev->driver->unload)
409                 dev->driver->unload(dev);
410
411         drm_put_head(&dev->primary);
412         if (drm_put_dev(dev))
413                 DRM_ERROR("Cannot unload module\n");
414 }
415
416 void drm_exit(struct drm_driver *driver)
417 {
418         int i;
419         drm_device_t *dev = NULL;
420         drm_head_t *head;
421
422         DRM_DEBUG("\n");
423         if (drm_fb_loaded) {
424                 for (i = 0; i < drm_cards_limit; i++) {
425                         head = drm_heads[i];
426                         if (!head)
427                                 continue;
428                         if (!head->dev)
429                                 continue;
430                         if (head->dev->driver != driver)
431                                 continue;
432                         dev = head->dev;
433                         if (dev) {
434                                 /* release the pci driver */
435                                 if (dev->pdev)
436                                         pci_dev_put(dev->pdev);
437                                 drm_cleanup(dev);
438                         }
439                 }
440         } else
441                 pci_unregister_driver(&driver->pci_driver);
442 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
443         free_nopage_retry();
444 #endif
445         DRM_INFO("Module unloaded\n");
446 }
447 EXPORT_SYMBOL(drm_exit);
448
449 /** File operations structure */
450 static const struct file_operations drm_stub_fops = {
451         .owner = THIS_MODULE,
452         .open = drm_stub_open
453 };
454
455 static int __init drm_core_init(void)
456 {
457         int ret;
458         struct sysinfo si;
459         unsigned long avail_memctl_mem;
460         unsigned long max_memctl_mem;
461
462         si_meminfo(&si);
463         
464         /*
465          * AGP only allows low / DMA32 memory ATM.
466          */
467
468         avail_memctl_mem = si.totalram - si.totalhigh;
469
470         /* 
471          * Avoid overflows 
472          */
473
474         max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
475         max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; 
476
477         if (avail_memctl_mem >= max_memctl_mem)
478                 avail_memctl_mem = max_memctl_mem;
479
480         drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
481
482         ret = -ENOMEM;
483         drm_cards_limit =
484             (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
485         drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
486         if (!drm_heads)
487                 goto err_p1;
488
489         if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
490                 goto err_p1;
491
492         drm_class = drm_sysfs_create(THIS_MODULE, "drm");
493         if (IS_ERR(drm_class)) {
494                 printk(KERN_ERR "DRM: Error creating drm class.\n");
495                 ret = PTR_ERR(drm_class);
496                 goto err_p2;
497         }
498
499         drm_proc_root = proc_mkdir("dri", NULL);
500         if (!drm_proc_root) {
501                 DRM_ERROR("Cannot create /proc/dri\n");
502                 ret = -1;
503                 goto err_p3;
504         }
505
506         drm_mem_init();
507
508         DRM_INFO("Initialized %s %d.%d.%d %s\n",
509                  CORE_NAME,
510                  CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
511         return 0;
512 err_p3:
513         drm_sysfs_destroy(drm_class);
514 err_p2:
515         unregister_chrdev(DRM_MAJOR, "drm");
516         drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
517 err_p1:
518         return ret;
519 }
520
521 static void __exit drm_core_exit(void)
522 {
523         remove_proc_entry("dri", NULL);
524         drm_sysfs_destroy(drm_class);
525
526         unregister_chrdev(DRM_MAJOR, "drm");
527
528         drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
529 }
530
531 module_init(drm_core_init);
532 module_exit(drm_core_exit);
533
534 /**
535  * Get version information
536  *
537  * \param inode device inode.
538  * \param filp file pointer.
539  * \param cmd command.
540  * \param arg user argument, pointing to a drm_version structure.
541  * \return zero on success or negative number on failure.
542  *
543  * Fills in the version information in \p arg.
544  */
545 static int drm_version(struct inode *inode, struct file *filp,
546                 unsigned int cmd, unsigned long arg)
547 {
548         drm_file_t *priv = filp->private_data;
549         drm_device_t *dev = priv->head->dev;
550         drm_version_t __user *argp = (void __user *)arg;
551         drm_version_t version;
552         int len;
553
554         if (copy_from_user(&version, argp, sizeof(version)))
555                 return -EFAULT;
556
557         version.version_major = dev->driver->major;
558         version.version_minor = dev->driver->minor;
559         version.version_patchlevel = dev->driver->patchlevel;
560         DRM_COPY(version.name, dev->driver->name);
561         DRM_COPY(version.date, dev->driver->date);
562         DRM_COPY(version.desc, dev->driver->desc);
563
564         if (copy_to_user(argp, &version, sizeof(version)))
565                 return -EFAULT;
566         return 0;
567 }
568
569 /**
570  * Called whenever a process performs an ioctl on /dev/drm.
571  *
572  * \param inode device inode.
573  * \param filp file pointer.
574  * \param cmd command.
575  * \param arg user argument.
576  * \return zero on success or negative number on failure.
577  *
578  * Looks up the ioctl function in the ::ioctls table, checking for root
579  * previleges if so required, and dispatches to the respective function.
580  */
581 int drm_ioctl(struct inode *inode, struct file *filp,
582               unsigned int cmd, unsigned long arg)
583 {
584         drm_file_t *priv = filp->private_data;
585         drm_device_t *dev = priv->head->dev;
586         drm_ioctl_desc_t *ioctl;
587         drm_ioctl_t *func;
588         unsigned int nr = DRM_IOCTL_NR(cmd);
589         int retcode = -EINVAL;
590
591         atomic_inc(&dev->ioctl_count);
592         atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
593         ++priv->ioctl_count;
594
595         DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
596                   current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
597                   priv->authenticated);
598
599         if ((nr >= DRM_CORE_IOCTL_COUNT) &&
600             ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
601                 goto err_i1;
602         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
603                 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
604                 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
605         else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
606                 ioctl = &drm_ioctls[nr];
607         else
608                 goto err_i1;
609
610         func = ioctl->func;
611         /* is there a local override? */
612         if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
613                 func = dev->driver->dma_ioctl;
614
615         if (!func) {
616                 DRM_DEBUG("no function\n");
617                 retcode = -EINVAL;
618         } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
619                    ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
620                    ((ioctl->flags & DRM_MASTER) && !priv->master)) {
621                 retcode = -EACCES;
622         } else {
623                 retcode = func(inode, filp, cmd, arg);
624         }
625 err_i1:
626         atomic_dec(&dev->ioctl_count);
627         if (retcode)
628                 DRM_DEBUG("ret = %x\n", retcode);
629         return retcode;
630 }
631 EXPORT_SYMBOL(drm_ioctl);
632
633 int drm_wait_on(drm_device_t *dev, wait_queue_head_t *queue, int timeout,
634                 int (*fn)(drm_device_t *dev, void *priv), void *priv)
635 {
636         DECLARE_WAITQUEUE(entry, current);
637         unsigned long end = jiffies + (timeout);
638         int ret = 0;
639         add_wait_queue(queue, &entry);
640
641         for (;;) {
642                 __set_current_state(TASK_INTERRUPTIBLE);
643                 if ((*fn)(dev, priv))
644                         break;
645                 if (time_after_eq(jiffies, end)) {
646                         ret = -EBUSY;
647                         break;
648                 }
649                 schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);
650                 if (signal_pending(current)) {
651                         ret = -EINTR;
652                         break;
653                 }
654         }
655         __set_current_state(TASK_RUNNING);
656         remove_wait_queue(queue, &entry);
657         return ret;
658 }
659 EXPORT_SYMBOL(drm_wait_on);
660
661 drm_local_map_t *drm_getsarea(struct drm_device *dev)
662 {
663         drm_map_list_t *entry;
664
665         list_for_each_entry(entry, &dev->maplist, head) {
666                 if (entry->map && entry->map->type == _DRM_SHM &&
667                     (entry->map->flags & _DRM_CONTAINS_LOCK)) {
668                         return entry->map;
669                 }
670         }
671         return NULL;
672 }
673 EXPORT_SYMBOL(drm_getsarea);