merged bsd-3-0-0-branch
[profile/ivi/libdrm.git] / bsd / gamma_dma.c
1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31
32
33
34 #include "gamma.h"
35 #include "drmP.h"
36 #include "drm.h"
37 #include "gamma_drm.h"
38 #include "gamma_drv.h"
39
40
41 static __inline__ void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42                                       unsigned long length)
43 {
44         drm_gamma_private_t *dev_priv =
45                 (drm_gamma_private_t *)dev->dev_private;
46
47         GAMMA_WRITE(GAMMA_DMAADDRESS, DRM_VTOPHYS((void *)address));
48         while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
49                 ;
50         GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
51 }
52
53 void gamma_dma_quiescent_single(drm_device_t *dev)
54 {
55         drm_gamma_private_t *dev_priv =
56                 (drm_gamma_private_t *)dev->dev_private;
57
58         while (GAMMA_READ(GAMMA_DMACOUNT))
59                 ;
60         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
61                 ;
62
63         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
64         GAMMA_WRITE(GAMMA_SYNC, 0);
65
66         do {
67                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
68                         ;
69         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
70 }
71
72 void gamma_dma_quiescent_dual(drm_device_t *dev)
73 {
74         drm_gamma_private_t *dev_priv =
75                 (drm_gamma_private_t *)dev->dev_private;
76
77         while (GAMMA_READ(GAMMA_DMACOUNT))
78                 ;
79         while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
80                 ;
81
82         GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
83
84         GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
85         GAMMA_WRITE(GAMMA_SYNC, 0);
86
87                                 /* Read from first MX */
88         do {
89                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
90                         ;
91         } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
92
93                                 /* Read from second MX */
94         do {
95                 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
96                         ;
97         } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
98 }
99
100 void gamma_dma_ready(drm_device_t *dev)
101 {
102         drm_gamma_private_t *dev_priv =
103                 (drm_gamma_private_t *)dev->dev_private;
104
105         while (GAMMA_READ(GAMMA_DMACOUNT))
106                 ;
107 }
108
109 static __inline__ int gamma_dma_is_ready(drm_device_t *dev)
110 {
111         drm_gamma_private_t *dev_priv =
112                 (drm_gamma_private_t *)dev->dev_private;
113
114         return !GAMMA_READ(GAMMA_DMACOUNT);
115 }
116
117 void gamma_dma_service( DRM_IRQ_ARGS)
118 {
119         drm_device_t        *dev      = (drm_device_t *)device;
120         drm_device_dma_t    *dma      = dev->dma;
121         drm_gamma_private_t *dev_priv =
122                 (drm_gamma_private_t *)dev->dev_private;
123
124         atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
125         GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
126         GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
127         GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
128         if (gamma_dma_is_ready(dev)) {
129                                 /* Free previous buffer */
130                 if (test_and_set_bit(0, &dev->dma_flag)) return;
131                 if (dma->this_buffer) {
132                         gamma_free_buffer(dev, dma->this_buffer);
133                         dma->this_buffer = NULL;
134                 }
135                 clear_bit(0, &dev->dma_flag);
136
137                 taskqueue_enqueue(taskqueue_swi, &dev->task);
138         }
139 }
140
141 /* Only called by gamma_dma_schedule. */
142 static int gamma_do_dma(drm_device_t *dev, int locked)
143 {
144         unsigned long    address;
145         unsigned long    length;
146         drm_buf_t        *buf;
147         int              retcode = 0;
148         drm_device_dma_t *dma = dev->dma;
149 #if DRM_DMA_HISTOGRAM
150         cycles_t         dma_start, dma_stop;
151 #endif
152
153         if (test_and_set_bit(0, &dev->dma_flag)) return DRM_ERR( EBUSY );
154
155 #if DRM_DMA_HISTOGRAM
156         dma_start = get_cycles();
157 #endif
158
159         if (!dma->next_buffer) {
160                 DRM_ERROR("No next_buffer\n");
161                 clear_bit(0, &dev->dma_flag);
162                 return DRM_ERR( EINVAL );
163         }
164
165         buf     = dma->next_buffer;
166         address = (unsigned long)buf->address;
167         length  = buf->used;
168
169         DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
170                   buf->context, buf->idx, length);
171
172         if (buf->list == DRM_LIST_RECLAIM) {
173                 gamma_clear_next_buffer(dev);
174                 gamma_free_buffer(dev, buf);
175                 clear_bit(0, &dev->dma_flag);
176                 return DRM_ERR( EINVAL );
177         }
178
179         if (!length) {
180                 DRM_ERROR("0 length buffer\n");
181                 gamma_clear_next_buffer(dev);
182                 gamma_free_buffer(dev, buf);
183                 clear_bit(0, &dev->dma_flag);
184                 return 0;
185         }
186
187         if (!gamma_dma_is_ready(dev)) {
188                 clear_bit(0, &dev->dma_flag);
189                 return DRM_ERR( EBUSY );
190         }
191
192         if (buf->while_locked) {
193                 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
194                         DRM_ERROR("Dispatching buffer %d from pid %d"
195                                   " \"while locked\", but no lock held\n",
196                                   buf->idx, buf->pid);
197                 }
198         } else {
199                 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
200                                               DRM_KERNEL_CONTEXT)) {
201                         clear_bit(0, &dev->dma_flag);
202                         return DRM_ERR( EBUSY );
203                 }
204         }
205
206         if (dev->last_context != buf->context
207             && !(dev->queuelist[buf->context]->flags
208                  & _DRM_CONTEXT_PRESERVED)) {
209                                 /* PRE: dev->last_context != buf->context */
210                 if (DRM(context_switch)(dev, dev->last_context,
211                                         buf->context)) {
212                         DRM(clear_next_buffer)(dev);
213                         DRM(free_buffer)(dev, buf);
214                 }
215                 retcode = EBUSY;
216                 goto cleanup;
217
218                                 /* POST: we will wait for the context
219                                    switch and will dispatch on a later call
220                                    when dev->last_context == buf->context.
221                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
222                                    TIME! */
223         }
224
225         gamma_clear_next_buffer(dev);
226         buf->pending     = 1;
227         buf->waiting     = 0;
228         buf->list        = DRM_LIST_PEND;
229 #if DRM_DMA_HISTOGRAM
230         buf->time_dispatched = get_cycles();
231 #endif
232
233         gamma_dma_dispatch(dev, address, length);
234         gamma_free_buffer(dev, dma->this_buffer);
235         dma->this_buffer = buf;
236
237         atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
238         atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
239
240         if (!buf->while_locked && !dev->context_flag && !locked) {
241                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
242                                   DRM_KERNEL_CONTEXT)) {
243                         DRM_ERROR("\n");
244                 }
245         }
246 cleanup:
247
248         clear_bit(0, &dev->dma_flag);
249
250 #if DRM_DMA_HISTOGRAM
251         dma_stop = get_cycles();
252         atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
253 #endif
254
255         return DRM_ERR( retcode );
256 }
257
258 static void gamma_dma_timer_bh(unsigned long dev)
259 {
260         gamma_dma_schedule((drm_device_t *)dev, 0);
261 }
262
263 void gamma_dma_immediate_bh(DRM_TASKQUEUE_ARGS)
264 {
265         gamma_dma_schedule(dev, 0);
266 }
267
268 int gamma_dma_schedule(drm_device_t *dev, int locked)
269 {
270         int              next;
271         drm_queue_t      *q;
272         drm_buf_t        *buf;
273         int              retcode   = 0;
274         int              processed = 0;
275         int              missed;
276         int              expire    = 20;
277         drm_device_dma_t *dma      = dev->dma;
278 #if DRM_DMA_HISTOGRAM
279         cycles_t         schedule_start;
280 #endif
281
282         if (test_and_set_bit(0, &dev->interrupt_flag)) {
283                                 /* Not reentrant */
284                 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
285                 return DRM_ERR( EBUSY );
286         }
287         missed = atomic_read(&dev->counts[10]);
288
289 #if DRM_DMA_HISTOGRAM
290         schedule_start = get_cycles();
291 #endif
292
293 again:
294         if (dev->context_flag) {
295                 clear_bit(0, &dev->interrupt_flag);
296                 return DRM_ERR( EBUSY );
297         }
298         if (dma->next_buffer) {
299                                 /* Unsent buffer that was previously
300                                    selected, but that couldn't be sent
301                                    because the lock could not be obtained
302                                    or the DMA engine wasn't ready.  Try
303                                    again. */
304                 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
305         } else {
306                 do {
307                         next = gamma_select_queue(dev, gamma_dma_timer_bh);
308                         if (next >= 0) {
309                                 q   = dev->queuelist[next];
310                                 buf = gamma_waitlist_get(&q->waitlist);
311                                 dma->next_buffer = buf;
312                                 dma->next_queue  = q;
313                                 if (buf && buf->list == DRM_LIST_RECLAIM) {
314                                         gamma_clear_next_buffer(dev);
315                                         gamma_free_buffer(dev, buf);
316                                 }
317                         }
318                 } while (next >= 0 && !dma->next_buffer);
319                 if (dma->next_buffer) {
320                         if (!(retcode = gamma_do_dma(dev, locked))) {
321                                 ++processed;
322                         }
323                 }
324         }
325
326         if (--expire) {
327                 if (missed != atomic_read(&dev->counts[10])) {
328                         if (gamma_dma_is_ready(dev)) goto again;
329                 }
330                 if (processed && gamma_dma_is_ready(dev)) {
331                         processed = 0;
332                         goto again;
333                 }
334         }
335
336         clear_bit(0, &dev->interrupt_flag);
337
338 #if DRM_DMA_HISTOGRAM
339         atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
340                                                            - schedule_start)]);
341 #endif
342         return retcode;
343 }
344
345 static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
346 {
347         unsigned long     address;
348         unsigned long     length;
349         int               must_free = 0;
350         int               retcode   = 0;
351         int               i;
352         int               idx;
353         drm_buf_t         *buf;
354         drm_buf_t         *last_buf = NULL;
355         drm_device_dma_t  *dma      = dev->dma;
356         static int never;
357
358                                 /* Turn off interrupt handling */
359         while (test_and_set_bit(0, &dev->interrupt_flag)) {
360                 retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
361                 if (retcode)
362                         return retcode;
363         }
364         if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
365                 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
366                                       DRM_KERNEL_CONTEXT)) {
367                         retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
368                         if (retcode)
369                                 return retcode;
370                 }
371                 ++must_free;
372         }
373
374         for (i = 0; i < d->send_count; i++) {
375                 idx = d->send_indices[i];
376                 if (idx < 0 || idx >= dma->buf_count) {
377                         DRM_ERROR("Index %d (of %d max)\n",
378                                   d->send_indices[i], dma->buf_count - 1);
379                         continue;
380                 }
381                 buf = dma->buflist[ idx ];
382                 if (buf->pid != DRM_CURRENTPID) {
383                         DRM_ERROR("Process %d using buffer owned by %d\n",
384                                   DRM_CURRENTPID, buf->pid);
385                         retcode = EINVAL;
386                         goto cleanup;
387                 }
388                 if (buf->list != DRM_LIST_NONE) {
389                         DRM_ERROR("Process %d using %d's buffer on list %d\n",
390                                   DRM_CURRENTPID, buf->pid, buf->list);
391                         retcode = EINVAL;
392                         goto cleanup;
393                 }
394                                 /* This isn't a race condition on
395                                    buf->list, since our concern is the
396                                    buffer reclaim during the time the
397                                    process closes the /dev/drm? handle, so
398                                    it can't also be doing DMA. */
399                 buf->list         = DRM_LIST_PRIO;
400                 buf->used         = d->send_sizes[i];
401                 buf->context      = d->context;
402                 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
403                 address           = (unsigned long)buf->address;
404                 length            = buf->used;
405                 if (!length) {
406                         DRM_ERROR("0 length buffer\n");
407                 }
408                 if (buf->pending) {
409                         DRM_ERROR("Sending pending buffer:"
410                                   " buffer %d, offset %d\n",
411                                   d->send_indices[i], i);
412                         retcode = EINVAL;
413                         goto cleanup;
414                 }
415                 if (buf->waiting) {
416                         DRM_ERROR("Sending waiting buffer:"
417                                   " buffer %d, offset %d\n",
418                                   d->send_indices[i], i);
419                         retcode = EINVAL;
420                         goto cleanup;
421                 }
422                 buf->pending = 1;
423
424                 if (dev->last_context != buf->context
425                     && !(dev->queuelist[buf->context]->flags
426                          & _DRM_CONTEXT_PRESERVED)) {
427                         /* PRE: dev->last_context != buf->context */
428                         DRM(context_switch)(dev, dev->last_context,
429                                             buf->context);
430                                 /* POST: we will wait for the context
431                                    switch and will dispatch on a later call
432                                    when dev->last_context == buf->context.
433                                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
434                                    TIME! */
435                         retcode = tsleep(&dev->context_wait,  PZERO|PCATCH,
436                                        "gamctx", 0);
437                         if (retcode)
438                                 goto cleanup;
439                         if (dev->last_context != buf->context) {
440                                 DRM_ERROR("Context mismatch: %d %d\n",
441                                           dev->last_context,
442                                           buf->context);
443                         }
444                 }
445
446 #if DRM_DMA_HISTOGRAM
447                 buf->time_queued     = get_cycles();
448                 buf->time_dispatched = buf->time_queued;
449 #endif
450                 gamma_dma_dispatch(dev, address, length);
451                 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
452                 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
453
454                 if (last_buf) {
455                         gamma_free_buffer(dev, last_buf);
456                 }
457                 last_buf = buf;
458         }
459
460
461 cleanup:
462         if (last_buf) {
463                 gamma_dma_ready(dev);
464                 gamma_free_buffer(dev, last_buf);
465         }
466
467         if (must_free && !dev->context_flag) {
468                 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
469                                   DRM_KERNEL_CONTEXT)) {
470                         DRM_ERROR("\n");
471                 }
472         }
473         clear_bit(0, &dev->interrupt_flag);
474         return DRM_ERR( retcode );
475 }
476
477 static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
478 {
479         drm_buf_t         *last_buf = NULL;
480         int               retcode   = 0;
481         drm_device_dma_t  *dma      = dev->dma;
482
483         if (d->flags & _DRM_DMA_BLOCK) {
484                 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
485                 atomic_inc(&last_buf->dma_wait);
486         }
487
488         if ((retcode = gamma_dma_enqueue(dev, d))) {
489                 if (d->flags & _DRM_DMA_BLOCK)
490                         atomic_dec(&last_buf->dma_wait);
491                 return retcode;
492         }
493
494         gamma_dma_schedule(dev, 0);
495
496         if (d->flags & _DRM_DMA_BLOCK) {
497                 DRM_DEBUG("%d waiting\n", DRM_CURRENTPID);
498                 for (;;) {
499                         retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
500                                          "gamdw", 0);
501                         if (!last_buf->waiting
502                             && !last_buf->pending)
503                                 break; /* finished */
504                         if (retcode)
505                                 break;
506                 }
507                 atomic_dec(&last_buf->dma_wait);
508                 DRM_DEBUG("%d running\n", DRM_CURRENTPID);
509                 if (!retcode
510                     || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
511                         if (!last_buf->dma_wait) {
512                                 gamma_free_buffer(dev, last_buf);
513                         }
514                 }
515                 if (retcode) {
516                         DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
517                                   d->context,
518                                   last_buf->waiting,
519                                   last_buf->pending,
520                                   DRM_WAITCOUNT(dev, d->context),
521                                   last_buf->idx,
522                                   last_buf->list,
523                                   last_buf->pid,
524                                   DRM_CURRENTPID);
525                 }
526         }
527         return DRM_ERR( retcode );
528 }
529
530 int gamma_dma( DRM_IOCTL_ARGS )
531 {
532         DRM_DEVICE;
533         drm_device_dma_t  *dma      = dev->dma;
534         int               retcode   = 0;
535         drm_dma_t         d;
536
537         DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d));
538
539         if (d.send_count < 0 || d.send_count > dma->buf_count) {
540                 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
541                           DRM_CURRENTPID, d.send_count, dma->buf_count);
542                 return DRM_ERR( EINVAL );
543         }
544
545         if (d.request_count < 0 || d.request_count > dma->buf_count) {
546                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
547                           DRM_CURRENTPID, d.request_count, dma->buf_count);
548                 return DRM_ERR( EINVAL );
549         }
550
551         if (d.send_count) {
552                 if (d.flags & _DRM_DMA_PRIORITY)
553                         retcode = gamma_dma_priority(dev, &d);
554                 else
555                         retcode = gamma_dma_send_buffers(dev, &d);
556         }
557
558         d.granted_count = 0;
559
560         if (!retcode && d.request_count) {
561                 retcode = gamma_dma_get_buffers(dev, &d);
562         }
563
564         DRM_DEBUG("%d returning, granted = %d\n",
565                   DRM_CURRENTPID, d.granted_count);
566         DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d));
567
568         return retcode;
569 }