1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
3 * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.c,v 1.3 2000/02/23 04:47:31 martin Exp $
34 EXPORT_SYMBOL(tdfx_init);
35 EXPORT_SYMBOL(tdfx_cleanup);
37 #define TDFX_NAME "tdfx"
38 #define TDFX_DESC "tdfx"
39 #define TDFX_DATE "19991009"
42 #define TDFX_PATCHLEVEL 1
44 static drm_device_t tdfx_device;
45 drm_ctx_t tdfx_res_ctx;
47 static struct file_operations tdfx_fops = {
50 release: tdfx_release,
58 static struct miscdevice tdfx_misc = {
59 minor: MISC_DYNAMIC_MINOR,
64 static drm_ioctl_desc_t tdfx_ioctls[] = {
65 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
66 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
67 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
68 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
70 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
72 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
73 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
76 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
78 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
79 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
80 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
81 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
82 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
83 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
84 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
85 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
86 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
87 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
89 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1},
90 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1},
91 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1},
92 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 1},
93 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
94 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
95 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_unbind, 1, 1},
96 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_bind, 1, 1},
99 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
102 int init_module(void);
103 void cleanup_module(void);
104 static char *tdfx = NULL;
106 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
107 MODULE_DESCRIPTION("tdfx");
108 MODULE_PARM(tdfx, "s");
110 /* init_module is called when insmod is used to load the module */
112 int init_module(void)
117 /* cleanup_module is called when rmmod is used to unload the module */
119 void cleanup_module(void)
126 /* tdfx_setup is called by the kernel to parse command-line options passed
127 * via the boot-loader (e.g., LILO). It calls the insmod option routine,
130 * This is not currently supported, since it requires changes to
131 * linux/init/main.c. */
134 void __init tdfx_setup(char *str, int *ints)
137 DRM_ERROR("Illegal command line format, ignored\n");
140 drm_parse_options(str);
144 static int tdfx_setup(drm_device_t *dev)
148 atomic_set(&dev->ioctl_count, 0);
149 atomic_set(&dev->vma_count, 0);
151 atomic_set(&dev->buf_alloc, 0);
153 atomic_set(&dev->total_open, 0);
154 atomic_set(&dev->total_close, 0);
155 atomic_set(&dev->total_ioctl, 0);
156 atomic_set(&dev->total_irq, 0);
157 atomic_set(&dev->total_ctx, 0);
158 atomic_set(&dev->total_locks, 0);
159 atomic_set(&dev->total_unlocks, 0);
160 atomic_set(&dev->total_contends, 0);
161 atomic_set(&dev->total_sleeps, 0);
163 for (i = 0; i < DRM_HASH_SIZE; i++) {
164 dev->magiclist[i].head = NULL;
165 dev->magiclist[i].tail = NULL;
170 dev->lock.hw_lock = NULL;
171 init_waitqueue_head(&dev->lock.lock_queue);
172 dev->queue_count = 0;
173 dev->queue_reserved = 0;
174 dev->queue_slots = 0;
175 dev->queuelist = NULL;
177 dev->context_flag = 0;
178 dev->interrupt_flag = 0;
181 dev->last_context = 0;
182 dev->last_switch = 0;
183 dev->last_checked = 0;
184 init_timer(&dev->timer);
185 init_waitqueue_head(&dev->context_wait);
190 dev->buf_rp = dev->buf;
191 dev->buf_wp = dev->buf;
192 dev->buf_end = dev->buf + DRM_BSZ;
193 dev->buf_async = NULL;
194 init_waitqueue_head(&dev->buf_readers);
195 init_waitqueue_head(&dev->buf_writers);
197 tdfx_res_ctx.handle=-1;
201 /* The kernel's context could be created here, but is now created
202 in drm_dma_enqueue. This is more resource-efficient for
203 hardware that does not do DMA, but may mean that
204 drm_select_queue fails between the time the interrupt is
205 initialized and the time the queues are initialized. */
211 static int tdfx_takedown(drm_device_t *dev)
214 drm_magic_entry_t *pt, *next;
216 drm_vma_entry_t *vma, *vma_next;
220 down(&dev->struct_sem);
221 del_timer(&dev->timer);
224 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
229 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
234 for (i = 0; i < DRM_HASH_SIZE; i++) {
235 for (pt = dev->magiclist[i].head; pt; pt = next) {
237 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
239 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
242 /* Clear AGP information */
245 drm_agp_mem_t *temp_next;
247 temp = dev->agp->memory;
248 while(temp != NULL) {
249 temp_next = temp->next;
250 drm_free_agp(temp->memory, temp->pages);
251 drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
254 if(dev->agp->acquired) (*drm_agp.release)();
255 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
259 /* Clear vma list (only built for debugging) */
261 for (vma = dev->vmalist; vma; vma = vma_next) {
262 vma_next = vma->next;
263 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
268 /* Clear map area and mtrr information */
270 for (i = 0; i < dev->map_count; i++) {
271 map = dev->maplist[i];
274 case _DRM_FRAME_BUFFER:
276 if (map->mtrr >= 0) {
278 retcode = mtrr_del(map->mtrr,
281 DRM_DEBUG("mtrr_del = %d\n", retcode);
284 drm_ioremapfree(map->handle, map->size);
287 drm_free_pages((unsigned long)map->handle,
293 /* Do nothing here, because this is all
294 handled in the AGP/GART driver. */
297 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
299 drm_free(dev->maplist,
300 dev->map_count * sizeof(*dev->maplist),
306 if (dev->lock.hw_lock) {
307 dev->lock.hw_lock = NULL; /* SHM removed */
309 wake_up_interruptible(&dev->lock.lock_queue);
311 up(&dev->struct_sem);
316 /* tdfx_init is called via init_module at module load time, or via
317 * linux/init/main.c (this is not currently supported). */
322 drm_device_t *dev = &tdfx_device;
326 memset((void *)dev, 0, sizeof(*dev));
327 dev->count_lock = SPIN_LOCK_UNLOCKED;
328 sema_init(&dev->struct_sem, 1);
331 drm_parse_options(tdfx);
334 if ((retcode = misc_register(&tdfx_misc))) {
335 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
338 dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
339 dev->name = TDFX_NAME;
344 dev->agp = drm_agp_init();
346 if((retcode = drm_ctxbitmap_init(dev))) {
347 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
349 misc_deregister(&tdfx_misc);
354 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
365 /* tdfx_cleanup is called via cleanup_module at module unload time. */
367 void tdfx_cleanup(void)
369 drm_device_t *dev = &tdfx_device;
374 if (misc_deregister(&tdfx_misc)) {
375 DRM_ERROR("Cannot unload module\n");
377 DRM_INFO("Module unloaded\n");
379 drm_ctxbitmap_cleanup(dev);
383 int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
386 drm_version_t version;
389 copy_from_user_ret(&version,
390 (drm_version_t *)arg,
394 #define DRM_COPY(name,value) \
395 len = strlen(value); \
396 if (len > name##_len) len = name##_len; \
397 name##_len = strlen(value); \
399 copy_to_user_ret(name, value, len, -EFAULT); \
402 version.version_major = TDFX_MAJOR;
403 version.version_minor = TDFX_MINOR;
404 version.version_patchlevel = TDFX_PATCHLEVEL;
406 DRM_COPY(version.name, TDFX_NAME);
407 DRM_COPY(version.date, TDFX_DATE);
408 DRM_COPY(version.desc, TDFX_DESC);
410 copy_to_user_ret((drm_version_t *)arg,
417 int tdfx_open(struct inode *inode, struct file *filp)
419 drm_device_t *dev = &tdfx_device;
422 DRM_DEBUG("open_count = %d\n", dev->open_count);
423 if (!(retcode = drm_open_helper(inode, filp, dev))) {
425 atomic_inc(&dev->total_open);
426 spin_lock(&dev->count_lock);
427 if (!dev->open_count++) {
428 spin_unlock(&dev->count_lock);
429 return tdfx_setup(dev);
431 spin_unlock(&dev->count_lock);
436 int tdfx_release(struct inode *inode, struct file *filp)
438 drm_file_t *priv = filp->private_data;
439 drm_device_t *dev = priv->dev;
442 DRM_DEBUG("open_count = %d\n", dev->open_count);
443 if (!(retcode = drm_release(inode, filp))) {
445 atomic_inc(&dev->total_close);
446 spin_lock(&dev->count_lock);
447 if (!--dev->open_count) {
448 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
449 DRM_ERROR("Device busy: %d %d\n",
450 atomic_read(&dev->ioctl_count),
452 spin_unlock(&dev->count_lock);
455 spin_unlock(&dev->count_lock);
456 return tdfx_takedown(dev);
458 spin_unlock(&dev->count_lock);
463 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
465 int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
468 int nr = DRM_IOCTL_NR(cmd);
469 drm_file_t *priv = filp->private_data;
470 drm_device_t *dev = priv->dev;
472 drm_ioctl_desc_t *ioctl;
475 atomic_inc(&dev->ioctl_count);
476 atomic_inc(&dev->total_ioctl);
479 DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
480 current->pid, cmd, nr, dev->device, priv->authenticated);
482 if (nr >= TDFX_IOCTL_COUNT) {
485 ioctl = &tdfx_ioctls[nr];
489 DRM_DEBUG("no function\n");
491 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
492 || (ioctl->auth_needed && !priv->authenticated)) {
495 retcode = (func)(inode, filp, cmd, arg);
499 atomic_dec(&dev->ioctl_count);
503 int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
506 drm_file_t *priv = filp->private_data;
507 drm_device_t *dev = priv->dev;
508 DECLARE_WAITQUEUE(entry, current);
511 #if DRM_DMA_HISTOGRAM
514 dev->lck_start = start = get_cycles();
517 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
519 if (lock.context == DRM_KERNEL_CONTEXT) {
520 DRM_ERROR("Process %d using kernel context %d\n",
521 current->pid, lock.context);
525 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
526 lock.context, current->pid, dev->lock.hw_lock->lock,
530 /* dev->queue_count == 0 right now for
532 if (lock.context < 0 || lock.context >= dev->queue_count)
538 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
540 long j = jiffies - dev->lock.lock_time;
542 if (lock.context == tdfx_res_ctx.handle &&
543 j >= 0 && j < DRM_LOCK_SLICE) {
544 /* Can't take lock if we just had it and
545 there is contention. */
546 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
547 lock.context, current->pid, j,
548 dev->lock.lock_time, jiffies);
549 current->state = TASK_INTERRUPTIBLE;
550 current->policy |= SCHED_YIELD;
551 schedule_timeout(DRM_LOCK_SLICE-j);
552 DRM_DEBUG("jiffies=%d\n", jiffies);
556 add_wait_queue(&dev->lock.lock_queue, &entry);
558 if (!dev->lock.hw_lock) {
559 /* Device has been unregistered */
563 if (drm_lock_take(&dev->lock.hw_lock->lock,
565 dev->lock.pid = current->pid;
566 dev->lock.lock_time = jiffies;
567 atomic_inc(&dev->total_locks);
568 break; /* Got lock */
572 atomic_inc(&dev->total_sleeps);
573 current->state = TASK_INTERRUPTIBLE;
575 current->policy |= SCHED_YIELD;
578 if (signal_pending(current)) {
583 current->state = TASK_RUNNING;
584 remove_wait_queue(&dev->lock.lock_queue, &entry);
588 if (!ret && dev->last_context != lock.context &&
589 lock.context != tdfx_res_ctx.handle &&
590 dev->last_context != tdfx_res_ctx.handle) {
591 add_wait_queue(&dev->context_wait, &entry);
592 current->state = TASK_INTERRUPTIBLE;
593 /* PRE: dev->last_context != lock.context */
594 tdfx_context_switch(dev, dev->last_context, lock.context);
595 /* POST: we will wait for the context
596 switch and will dispatch on a later call
597 when dev->last_context == lock.context
598 NOTE WE HOLD THE LOCK THROUGHOUT THIS
600 current->policy |= SCHED_YIELD;
602 current->state = TASK_RUNNING;
603 remove_wait_queue(&dev->context_wait, &entry);
604 if (signal_pending(current)) {
606 } else if (dev->last_context != lock.context) {
607 DRM_ERROR("Context mismatch: %d %d\n",
608 dev->last_context, lock.context);
614 if (lock.flags & _DRM_LOCK_READY) {
615 /* Wait for space in DMA/FIFO */
617 if (lock.flags & _DRM_LOCK_QUIESCENT) {
618 /* Make hardware quiescent */
626 DRM_ERROR("pid = %5d, old counter = %5ld\n",
627 current->pid, current->counter);
629 if (lock.context != tdfx_res_ctx.handle) {
630 current->counter = 5;
631 current->priority = DEF_PRIORITY/4;
634 while (current->counter > 25)
635 current->counter >>= 1; /* decrease time slice */
636 DRM_ERROR("pid = %5d, new counter = %5ld\n",
637 current->pid, current->counter);
639 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
641 #if DRM_DMA_HISTOGRAM
642 atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
649 int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
652 drm_file_t *priv = filp->private_data;
653 drm_device_t *dev = priv->dev;
656 copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
658 if (lock.context == DRM_KERNEL_CONTEXT) {
659 DRM_ERROR("Process %d using kernel context %d\n",
660 current->pid, lock.context);
664 DRM_DEBUG("%d frees lock (%d holds)\n",
666 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
667 atomic_inc(&dev->total_unlocks);
668 if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
669 atomic_inc(&dev->total_contends);
670 drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
671 /* FIXME: Try to send data to card here */
672 if (!dev->context_flag) {
673 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
674 DRM_KERNEL_CONTEXT)) {
680 current->policy |= SCHED_YIELD;
681 current->state = TASK_INTERRUPTIBLE;
682 schedule_timeout(1000);
685 if (lock.context != tdfx_res_ctx.handle) {
686 current->counter = 5;
687 current->priority = DEF_PRIORITY;
690 current->state = TASK_INTERRUPTIBLE;
691 schedule_timeout(10);