fb7a997b4b6d4acaf025c310b49276b05a8c1bfb
[platform/upstream/libdrm.git] / linux-core / tdfx_drv.c
1 /* tdfx.c -- tdfx driver -*- linux-c -*-
2  * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
3  * Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
4  *
5  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  * 
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  * 
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  * 
27  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/tdfx_drv.c,v 1.3 2000/02/23 04:47:31 martin Exp $
28  *
29  */
30
31 #define EXPORT_SYMTAB
32 #include "drmP.h"
33 #include "tdfx_drv.h"
34 EXPORT_SYMBOL(tdfx_init);
35 EXPORT_SYMBOL(tdfx_cleanup);
36
37 #define TDFX_NAME        "tdfx"
38 #define TDFX_DESC        "tdfx"
39 #define TDFX_DATE        "19991009"
40 #define TDFX_MAJOR       0
41 #define TDFX_MINOR       0
42 #define TDFX_PATCHLEVEL  1
43
44 static drm_device_t           tdfx_device;
45 drm_ctx_t                     tdfx_res_ctx;
46
47 static struct file_operations tdfx_fops = {
48         open:    tdfx_open,
49         flush:   drm_flush,
50         release: tdfx_release,
51         ioctl:   tdfx_ioctl,
52         mmap:    drm_mmap,
53         read:    drm_read,
54         fasync:  drm_fasync,
55         poll:    drm_poll,
56 };
57
58 static struct miscdevice      tdfx_misc = {
59         minor: MISC_DYNAMIC_MINOR,
60         name:  TDFX_NAME,
61         fops:  &tdfx_fops,
62 };
63
64 static drm_ioctl_desc_t       tdfx_ioctls[] = {
65         [DRM_IOCTL_NR(DRM_IOCTL_VERSION)]    = { tdfx_version,    0, 0 },
66         [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique,   0, 0 },
67         [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]  = { drm_getmagic,    0, 0 },
68         [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]  = { drm_irq_busid,   0, 1 },
69
70         [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique,   1, 1 },
71         [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]      = { drm_block,       1, 1 },
72         [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]    = { drm_unblock,     1, 1 },
73         [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic,   1, 1 },
74         [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]    = { drm_addmap,      1, 1 },
75         
76         [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]    = { tdfx_addctx,     1, 1 },
77         [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]     = { tdfx_rmctx,      1, 1 },
78         [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]    = { tdfx_modctx,     1, 1 },
79         [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]    = { tdfx_getctx,     1, 0 },
80         [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx,  1, 1 },
81         [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]    = { tdfx_newctx,     1, 1 },
82         [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]    = { tdfx_resctx,     1, 0 },
83         [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]   = { drm_adddraw,     1, 1 },
84         [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]    = { drm_rmdraw,      1, 1 },
85         [DRM_IOCTL_NR(DRM_IOCTL_LOCK)]       = { tdfx_lock,       1, 0 },
86         [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]     = { tdfx_unlock,     1, 0 },
87         [DRM_IOCTL_NR(DRM_IOCTL_FINISH)]     = { drm_finish,      1, 0 },
88 #ifdef DRM_AGP
89         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = {drm_agp_acquire, 1, 1},
90         [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = {drm_agp_release, 1, 1},
91         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = {drm_agp_enable,  1, 1},
92         [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = {drm_agp_info,    1, 1},
93         [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = {drm_agp_alloc,   1, 1},
94         [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = {drm_agp_free,    1, 1},
95         [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = {drm_agp_unbind,  1, 1},
96         [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = {drm_agp_bind,    1, 1},
97 #endif
98 };
99 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
100
101 #ifdef MODULE
102 int                           init_module(void);
103 void                          cleanup_module(void);
104 static char                   *tdfx = NULL;
105
106 MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas.");
107 MODULE_DESCRIPTION("tdfx");
108 MODULE_PARM(tdfx, "s");
109
110 /* init_module is called when insmod is used to load the module */
111
112 int init_module(void)
113 {
114         return tdfx_init();
115 }
116
117 /* cleanup_module is called when rmmod is used to unload the module */
118
119 void cleanup_module(void)
120 {
121         tdfx_cleanup();
122 }
123 #endif
124
125 #ifndef MODULE
126 /* tdfx_setup is called by the kernel to parse command-line options passed
127  * via the boot-loader (e.g., LILO).  It calls the insmod option routine,
128  * drm_parse_drm.
129  *
130  * This is not currently supported, since it requires changes to
131  * linux/init/main.c. */
132  
133
134 void __init tdfx_setup(char *str, int *ints)
135 {
136         if (ints[0] != 0) {
137                 DRM_ERROR("Illegal command line format, ignored\n");
138                 return;
139         }
140         drm_parse_options(str);
141 }
142 #endif
143
144 static int tdfx_setup(drm_device_t *dev)
145 {
146         int i;
147         
148         atomic_set(&dev->ioctl_count, 0);
149         atomic_set(&dev->vma_count, 0);
150         dev->buf_use      = 0;
151         atomic_set(&dev->buf_alloc, 0);
152
153         atomic_set(&dev->total_open, 0);
154         atomic_set(&dev->total_close, 0);
155         atomic_set(&dev->total_ioctl, 0);
156         atomic_set(&dev->total_irq, 0);
157         atomic_set(&dev->total_ctx, 0);
158         atomic_set(&dev->total_locks, 0);
159         atomic_set(&dev->total_unlocks, 0);
160         atomic_set(&dev->total_contends, 0);
161         atomic_set(&dev->total_sleeps, 0);
162
163         for (i = 0; i < DRM_HASH_SIZE; i++) {
164                 dev->magiclist[i].head = NULL;
165                 dev->magiclist[i].tail = NULL;
166         }
167         dev->maplist        = NULL;
168         dev->map_count      = 0;
169         dev->vmalist        = NULL;
170         dev->lock.hw_lock   = NULL;
171         init_waitqueue_head(&dev->lock.lock_queue);
172         dev->queue_count    = 0;
173         dev->queue_reserved = 0;
174         dev->queue_slots    = 0;
175         dev->queuelist      = NULL;
176         dev->irq            = 0;
177         dev->context_flag   = 0;
178         dev->interrupt_flag = 0;
179         dev->dma            = 0;
180         dev->dma_flag       = 0;
181         dev->last_context   = 0;
182         dev->last_switch    = 0;
183         dev->last_checked   = 0;
184         init_timer(&dev->timer);
185         init_waitqueue_head(&dev->context_wait);
186
187         dev->ctx_start      = 0;
188         dev->lck_start      = 0;
189         
190         dev->buf_rp       = dev->buf;
191         dev->buf_wp       = dev->buf;
192         dev->buf_end      = dev->buf + DRM_BSZ;
193         dev->buf_async    = NULL;
194         init_waitqueue_head(&dev->buf_readers);
195         init_waitqueue_head(&dev->buf_writers);
196
197         tdfx_res_ctx.handle=-1;
198                         
199         DRM_DEBUG("\n");
200                         
201         /* The kernel's context could be created here, but is now created
202            in drm_dma_enqueue.  This is more resource-efficient for
203            hardware that does not do DMA, but may mean that
204            drm_select_queue fails between the time the interrupt is
205            initialized and the time the queues are initialized. */
206                         
207         return 0;
208 }
209
210
211 static int tdfx_takedown(drm_device_t *dev)
212 {
213         int               i;
214         drm_magic_entry_t *pt, *next;
215         drm_map_t         *map;
216         drm_vma_entry_t   *vma, *vma_next;
217
218         DRM_DEBUG("\n");
219
220         down(&dev->struct_sem);
221         del_timer(&dev->timer);
222         
223         if (dev->devname) {
224                 drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
225                 dev->devname = NULL;
226         }
227         
228         if (dev->unique) {
229                 drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
230                 dev->unique = NULL;
231                 dev->unique_len = 0;
232         }
233                                 /* Clear pid list */
234         for (i = 0; i < DRM_HASH_SIZE; i++) {
235                 for (pt = dev->magiclist[i].head; pt; pt = next) {
236                         next = pt->next;
237                         drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
238                 }
239                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
240         }
241 #ifdef DRM_AGP
242                                 /* Clear AGP information */
243         if (dev->agp) {
244                 drm_agp_mem_t *temp;
245                 drm_agp_mem_t *temp_next;
246            
247                 temp = dev->agp->memory;
248                 while(temp != NULL) {
249                         temp_next = temp->next;
250                         drm_free_agp(temp->memory, temp->pages);
251                         drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
252                         temp = temp_next;
253                 }
254                 if(dev->agp->acquired) (*drm_agp.release)();
255                 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
256                 dev->agp = NULL;
257         }
258 #endif
259                                 /* Clear vma list (only built for debugging) */
260         if (dev->vmalist) {
261                 for (vma = dev->vmalist; vma; vma = vma_next) {
262                         vma_next = vma->next;
263                         drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
264                 }
265                 dev->vmalist = NULL;
266         }
267         
268                                 /* Clear map area and mtrr information */
269         if (dev->maplist) {
270                 for (i = 0; i < dev->map_count; i++) {
271                         map = dev->maplist[i];
272                         switch (map->type) {
273                         case _DRM_REGISTERS:
274                         case _DRM_FRAME_BUFFER:
275 #ifdef CONFIG_MTRR
276                                 if (map->mtrr >= 0) {
277                                         int retcode;
278                                         retcode = mtrr_del(map->mtrr,
279                                                            map->offset,
280                                                            map->size);
281                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
282                                 }
283 #endif
284                                 drm_ioremapfree(map->handle, map->size);
285                                 break;
286                         case _DRM_SHM:
287                                 drm_free_pages((unsigned long)map->handle,
288                                                drm_order(map->size)
289                                                - PAGE_SHIFT,
290                                                DRM_MEM_SAREA);
291                                 break;
292                         case _DRM_AGP:
293                                 /* Do nothing here, because this is all
294                                    handled in the AGP/GART driver. */
295                                 break;
296                         }
297                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
298                 }
299                 drm_free(dev->maplist,
300                          dev->map_count * sizeof(*dev->maplist),
301                          DRM_MEM_MAPS);
302                 dev->maplist   = NULL;
303                 dev->map_count = 0;
304         }
305         
306         if (dev->lock.hw_lock) {
307                 dev->lock.hw_lock    = NULL; /* SHM removed */
308                 dev->lock.pid        = 0;
309                 wake_up_interruptible(&dev->lock.lock_queue);
310         }
311         up(&dev->struct_sem);
312         
313         return 0;
314 }
315
316 /* tdfx_init is called via init_module at module load time, or via
317  * linux/init/main.c (this is not currently supported). */
318
319 int tdfx_init(void)
320 {
321         int                   retcode;
322         drm_device_t          *dev = &tdfx_device;
323
324         DRM_DEBUG("\n");
325
326         memset((void *)dev, 0, sizeof(*dev));
327         dev->count_lock   = SPIN_LOCK_UNLOCKED;
328         sema_init(&dev->struct_sem, 1);
329         
330 #ifdef MODULE
331         drm_parse_options(tdfx);
332 #endif
333
334         if ((retcode = misc_register(&tdfx_misc))) {
335                 DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
336                 return retcode;
337         }
338         dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
339         dev->name   = TDFX_NAME;
340
341         drm_mem_init();
342         drm_proc_init(dev);
343 #ifdef DRM_AGP
344         dev->agp    = drm_agp_init();
345 #endif
346         if((retcode = drm_ctxbitmap_init(dev))) {
347                 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
348                 drm_proc_cleanup();
349                 misc_deregister(&tdfx_misc);
350                 tdfx_takedown(dev);
351                 return retcode;
352         }
353
354         DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
355                  TDFX_NAME,
356                  TDFX_MAJOR,
357                  TDFX_MINOR,
358                  TDFX_PATCHLEVEL,
359                  TDFX_DATE,
360                  tdfx_misc.minor);
361         
362         return 0;
363 }
364
365 /* tdfx_cleanup is called via cleanup_module at module unload time. */
366
367 void tdfx_cleanup(void)
368 {
369         drm_device_t          *dev = &tdfx_device;
370
371         DRM_DEBUG("\n");
372         
373         drm_proc_cleanup();
374         if (misc_deregister(&tdfx_misc)) {
375                 DRM_ERROR("Cannot unload module\n");
376         } else {
377                 DRM_INFO("Module unloaded\n");
378         }
379         drm_ctxbitmap_cleanup(dev);
380         tdfx_takedown(dev);
381 }
382
383 int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
384                   unsigned long arg)
385 {
386         drm_version_t version;
387         int           len;
388
389         copy_from_user_ret(&version,
390                            (drm_version_t *)arg,
391                            sizeof(version),
392                            -EFAULT);
393
394 #define DRM_COPY(name,value)                                 \
395         len = strlen(value);                                 \
396         if (len > name##_len) len = name##_len;              \
397         name##_len = strlen(value);                          \
398         if (len && name) {                                   \
399                 copy_to_user_ret(name, value, len, -EFAULT); \
400         }
401
402         version.version_major      = TDFX_MAJOR;
403         version.version_minor      = TDFX_MINOR;
404         version.version_patchlevel = TDFX_PATCHLEVEL;
405
406         DRM_COPY(version.name, TDFX_NAME);
407         DRM_COPY(version.date, TDFX_DATE);
408         DRM_COPY(version.desc, TDFX_DESC);
409
410         copy_to_user_ret((drm_version_t *)arg,
411                          &version,
412                          sizeof(version),
413                          -EFAULT);
414         return 0;
415 }
416
417 int tdfx_open(struct inode *inode, struct file *filp)
418 {
419         drm_device_t  *dev    = &tdfx_device;
420         int           retcode = 0;
421         
422         DRM_DEBUG("open_count = %d\n", dev->open_count);
423         if (!(retcode = drm_open_helper(inode, filp, dev))) {
424                 MOD_INC_USE_COUNT;
425                 atomic_inc(&dev->total_open);
426                 spin_lock(&dev->count_lock);
427                 if (!dev->open_count++) {
428                         spin_unlock(&dev->count_lock);
429                         return tdfx_setup(dev);
430                 }
431                 spin_unlock(&dev->count_lock);
432         }
433         return retcode;
434 }
435
436 int tdfx_release(struct inode *inode, struct file *filp)
437 {
438         drm_file_t    *priv   = filp->private_data;
439         drm_device_t  *dev    = priv->dev;
440         int           retcode = 0;
441
442         DRM_DEBUG("open_count = %d\n", dev->open_count);
443         if (!(retcode = drm_release(inode, filp))) {
444                 MOD_DEC_USE_COUNT;
445                 atomic_inc(&dev->total_close);
446                 spin_lock(&dev->count_lock);
447                 if (!--dev->open_count) {
448                         if (atomic_read(&dev->ioctl_count) || dev->blocked) {
449                                 DRM_ERROR("Device busy: %d %d\n",
450                                           atomic_read(&dev->ioctl_count),
451                                           dev->blocked);
452                                 spin_unlock(&dev->count_lock);
453                                 return -EBUSY;
454                         }
455                         spin_unlock(&dev->count_lock);
456                         return tdfx_takedown(dev);
457                 }
458                 spin_unlock(&dev->count_lock);
459         }
460         return retcode;
461 }
462
463 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
464
465 int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
466                 unsigned long arg)
467 {
468         int              nr      = DRM_IOCTL_NR(cmd);
469         drm_file_t       *priv   = filp->private_data;
470         drm_device_t     *dev    = priv->dev;
471         int              retcode = 0;
472         drm_ioctl_desc_t *ioctl;
473         drm_ioctl_t      *func;
474
475         atomic_inc(&dev->ioctl_count);
476         atomic_inc(&dev->total_ioctl);
477         ++priv->ioctl_count;
478         
479         DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
480                   current->pid, cmd, nr, dev->device, priv->authenticated);
481
482         if (nr >= TDFX_IOCTL_COUNT) {
483                 retcode = -EINVAL;
484         } else {
485                 ioctl     = &tdfx_ioctls[nr];
486                 func      = ioctl->func;
487
488                 if (!func) {
489                         DRM_DEBUG("no function\n");
490                         retcode = -EINVAL;
491                 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
492                             || (ioctl->auth_needed && !priv->authenticated)) {
493                         retcode = -EACCES;
494                 } else {
495                         retcode = (func)(inode, filp, cmd, arg);
496                 }
497         }
498         
499         atomic_dec(&dev->ioctl_count);
500         return retcode;
501 }
502
503 int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
504               unsigned long arg)
505 {
506         drm_file_t        *priv   = filp->private_data;
507         drm_device_t      *dev    = priv->dev;
508         DECLARE_WAITQUEUE(entry, current);
509         int               ret   = 0;
510         drm_lock_t        lock;
511 #if DRM_DMA_HISTOGRAM
512         cycles_t          start;
513
514         dev->lck_start = start = get_cycles();
515 #endif
516
517         copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
518
519         if (lock.context == DRM_KERNEL_CONTEXT) {
520                 DRM_ERROR("Process %d using kernel context %d\n",
521                           current->pid, lock.context);
522                 return -EINVAL;
523         }
524
525         DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
526                   lock.context, current->pid, dev->lock.hw_lock->lock,
527                   lock.flags);
528
529 #if 0
530                                 /* dev->queue_count == 0 right now for
531                                    tdfx.  FIXME? */
532         if (lock.context < 0 || lock.context >= dev->queue_count)
533                 return -EINVAL;
534 #endif
535         
536         if (!ret) {
537 #if 0
538                 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
539                     != lock.context) {
540                         long j = jiffies - dev->lock.lock_time;
541
542                         if (lock.context == tdfx_res_ctx.handle &&
543                                 j >= 0 && j < DRM_LOCK_SLICE) {
544                                 /* Can't take lock if we just had it and
545                                    there is contention. */
546                                 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
547                                         lock.context, current->pid, j, 
548                                         dev->lock.lock_time, jiffies);
549                                 current->state = TASK_INTERRUPTIBLE;
550                                 current->policy |= SCHED_YIELD;
551                                 schedule_timeout(DRM_LOCK_SLICE-j);
552                                 DRM_DEBUG("jiffies=%d\n", jiffies);
553                         }
554                 }
555 #endif
556                 add_wait_queue(&dev->lock.lock_queue, &entry);
557                 for (;;) {
558                         if (!dev->lock.hw_lock) {
559                                 /* Device has been unregistered */
560                                 ret = -EINTR;
561                                 break;
562                         }
563                         if (drm_lock_take(&dev->lock.hw_lock->lock,
564                                           lock.context)) {
565                                 dev->lock.pid       = current->pid;
566                                 dev->lock.lock_time = jiffies;
567                                 atomic_inc(&dev->total_locks);
568                                 break;  /* Got lock */
569                         }
570                         
571                                 /* Contention */
572                         atomic_inc(&dev->total_sleeps);
573                         current->state = TASK_INTERRUPTIBLE;
574 #if 1
575                         current->policy |= SCHED_YIELD;
576 #endif
577                         schedule();
578                         if (signal_pending(current)) {
579                                 ret = -ERESTARTSYS;
580                                 break;
581                         }
582                 }
583                 current->state = TASK_RUNNING;
584                 remove_wait_queue(&dev->lock.lock_queue, &entry);
585         }
586
587 #if 0
588         if (!ret && dev->last_context != lock.context &&
589                 lock.context != tdfx_res_ctx.handle &&
590                 dev->last_context != tdfx_res_ctx.handle) {
591                 add_wait_queue(&dev->context_wait, &entry);
592                 current->state = TASK_INTERRUPTIBLE;
593                 /* PRE: dev->last_context != lock.context */
594                 tdfx_context_switch(dev, dev->last_context, lock.context);
595                 /* POST: we will wait for the context
596                    switch and will dispatch on a later call
597                    when dev->last_context == lock.context
598                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
599                    TIME! */
600                 current->policy |= SCHED_YIELD;
601                 schedule();
602                 current->state = TASK_RUNNING;
603                 remove_wait_queue(&dev->context_wait, &entry);
604                 if (signal_pending(current)) {
605                         ret = -EINTR;
606                 } else if (dev->last_context != lock.context) {
607                         DRM_ERROR("Context mismatch: %d %d\n",
608                                 dev->last_context, lock.context);
609                 }
610         }
611 #endif
612
613         if (!ret) {
614                 if (lock.flags & _DRM_LOCK_READY) {
615                                 /* Wait for space in DMA/FIFO */
616                 }
617                 if (lock.flags & _DRM_LOCK_QUIESCENT) {
618                                 /* Make hardware quiescent */
619 #if 0
620                         tdfx_quiescent(dev);
621 #endif
622                 }
623         }
624
625 #if 0
626         DRM_ERROR("pid = %5d, old counter = %5ld\n", 
627                 current->pid, current->counter);
628 #endif
629         if (lock.context != tdfx_res_ctx.handle) {
630                 current->counter = 5;
631                 current->priority = DEF_PRIORITY/4;
632         }
633 #if 0
634         while (current->counter > 25)
635                 current->counter >>= 1; /* decrease time slice */
636         DRM_ERROR("pid = %5d, new counter = %5ld\n",
637                  current->pid, current->counter);
638 #endif
639         DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
640
641 #if DRM_DMA_HISTOGRAM
642         atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
643 #endif
644         
645         return ret;
646 }
647
648
649 int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
650                  unsigned long arg)
651 {
652         drm_file_t        *priv   = filp->private_data;
653         drm_device_t      *dev    = priv->dev;
654         drm_lock_t        lock;
655
656         copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
657         
658         if (lock.context == DRM_KERNEL_CONTEXT) {
659                 DRM_ERROR("Process %d using kernel context %d\n",
660                           current->pid, lock.context);
661                 return -EINVAL;
662         }
663
664         DRM_DEBUG("%d frees lock (%d holds)\n",
665                   lock.context,
666                   _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
667         atomic_inc(&dev->total_unlocks);
668         if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
669                 atomic_inc(&dev->total_contends);
670         drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
671                                 /* FIXME: Try to send data to card here */
672         if (!dev->context_flag) {
673                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
674                                   DRM_KERNEL_CONTEXT)) {
675                         DRM_ERROR("\n");
676                 }
677         }
678
679 #if 0
680         current->policy |= SCHED_YIELD;
681         current->state = TASK_INTERRUPTIBLE;
682         schedule_timeout(1000);
683 #endif
684
685         if (lock.context != tdfx_res_ctx.handle) {
686                 current->counter = 5;
687                 current->priority = DEF_PRIORITY;
688         }
689 #if 0
690         current->state = TASK_INTERRUPTIBLE;
691         schedule_timeout(10);
692 #endif
693         
694         return 0;
695 }