1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
37 #include "gamma_drm.h"
38 #include "gamma_drv.h"
41 static __inline__ void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
47 GAMMA_WRITE(GAMMA_DMAADDRESS, DRM_VTOPHYS((void *)address));
48 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
50 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
53 void gamma_dma_quiescent_single(drm_device_t *dev)
55 drm_gamma_private_t *dev_priv =
56 (drm_gamma_private_t *)dev->dev_private;
58 while (GAMMA_READ(GAMMA_DMACOUNT))
60 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
63 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
64 GAMMA_WRITE(GAMMA_SYNC, 0);
67 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
69 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
72 void gamma_dma_quiescent_dual(drm_device_t *dev)
74 drm_gamma_private_t *dev_priv =
75 (drm_gamma_private_t *)dev->dev_private;
77 while (GAMMA_READ(GAMMA_DMACOUNT))
79 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
82 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
84 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
85 GAMMA_WRITE(GAMMA_SYNC, 0);
87 /* Read from first MX */
89 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
91 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
93 /* Read from second MX */
95 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
97 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
100 void gamma_dma_ready(drm_device_t *dev)
102 drm_gamma_private_t *dev_priv =
103 (drm_gamma_private_t *)dev->dev_private;
105 while (GAMMA_READ(GAMMA_DMACOUNT))
109 static __inline__ int gamma_dma_is_ready(drm_device_t *dev)
111 drm_gamma_private_t *dev_priv =
112 (drm_gamma_private_t *)dev->dev_private;
114 return !GAMMA_READ(GAMMA_DMACOUNT);
117 void gamma_dma_service( DRM_IRQ_ARGS)
119 drm_device_t *dev = (drm_device_t *)device;
120 drm_device_dma_t *dma = dev->dma;
121 drm_gamma_private_t *dev_priv =
122 (drm_gamma_private_t *)dev->dev_private;
124 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
125 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
126 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
127 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
128 if (gamma_dma_is_ready(dev)) {
129 /* Free previous buffer */
130 if (test_and_set_bit(0, &dev->dma_flag)) return;
131 if (dma->this_buffer) {
132 gamma_free_buffer(dev, dma->this_buffer);
133 dma->this_buffer = NULL;
135 clear_bit(0, &dev->dma_flag);
137 taskqueue_enqueue(taskqueue_swi, &dev->task);
141 /* Only called by gamma_dma_schedule. */
142 static int gamma_do_dma(drm_device_t *dev, int locked)
144 unsigned long address;
145 unsigned long length;
148 drm_device_dma_t *dma = dev->dma;
149 #if DRM_DMA_HISTOGRAM
150 cycles_t dma_start, dma_stop;
153 if (test_and_set_bit(0, &dev->dma_flag)) return DRM_ERR( EBUSY );
155 #if DRM_DMA_HISTOGRAM
156 dma_start = get_cycles();
159 if (!dma->next_buffer) {
160 DRM_ERROR("No next_buffer\n");
161 clear_bit(0, &dev->dma_flag);
162 return DRM_ERR( EINVAL );
165 buf = dma->next_buffer;
166 address = (unsigned long)buf->address;
169 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
170 buf->context, buf->idx, length);
172 if (buf->list == DRM_LIST_RECLAIM) {
173 gamma_clear_next_buffer(dev);
174 gamma_free_buffer(dev, buf);
175 clear_bit(0, &dev->dma_flag);
176 return DRM_ERR( EINVAL );
180 DRM_ERROR("0 length buffer\n");
181 gamma_clear_next_buffer(dev);
182 gamma_free_buffer(dev, buf);
183 clear_bit(0, &dev->dma_flag);
187 if (!gamma_dma_is_ready(dev)) {
188 clear_bit(0, &dev->dma_flag);
189 return DRM_ERR( EBUSY );
192 if (buf->while_locked) {
193 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
194 DRM_ERROR("Dispatching buffer %d from pid %d"
195 " \"while locked\", but no lock held\n",
199 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
200 DRM_KERNEL_CONTEXT)) {
201 clear_bit(0, &dev->dma_flag);
202 return DRM_ERR( EBUSY );
206 if (dev->last_context != buf->context
207 && !(dev->queuelist[buf->context]->flags
208 & _DRM_CONTEXT_PRESERVED)) {
209 /* PRE: dev->last_context != buf->context */
210 if (DRM(context_switch)(dev, dev->last_context,
212 DRM(clear_next_buffer)(dev);
213 DRM(free_buffer)(dev, buf);
218 /* POST: we will wait for the context
219 switch and will dispatch on a later call
220 when dev->last_context == buf->context.
221 NOTE WE HOLD THE LOCK THROUGHOUT THIS
225 gamma_clear_next_buffer(dev);
228 buf->list = DRM_LIST_PEND;
229 #if DRM_DMA_HISTOGRAM
230 buf->time_dispatched = get_cycles();
233 gamma_dma_dispatch(dev, address, length);
234 gamma_free_buffer(dev, dma->this_buffer);
235 dma->this_buffer = buf;
237 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
238 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
240 if (!buf->while_locked && !dev->context_flag && !locked) {
241 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
242 DRM_KERNEL_CONTEXT)) {
248 clear_bit(0, &dev->dma_flag);
250 #if DRM_DMA_HISTOGRAM
251 dma_stop = get_cycles();
252 atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
255 return DRM_ERR( retcode );
258 static void gamma_dma_timer_bh(unsigned long dev)
260 gamma_dma_schedule((drm_device_t *)dev, 0);
263 void gamma_dma_immediate_bh(DRM_TASKQUEUE_ARGS)
265 gamma_dma_schedule(dev, 0);
268 int gamma_dma_schedule(drm_device_t *dev, int locked)
277 drm_device_dma_t *dma = dev->dma;
278 #if DRM_DMA_HISTOGRAM
279 cycles_t schedule_start;
282 if (test_and_set_bit(0, &dev->interrupt_flag)) {
284 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
285 return DRM_ERR( EBUSY );
287 missed = atomic_read(&dev->counts[10]);
289 #if DRM_DMA_HISTOGRAM
290 schedule_start = get_cycles();
294 if (dev->context_flag) {
295 clear_bit(0, &dev->interrupt_flag);
296 return DRM_ERR( EBUSY );
298 if (dma->next_buffer) {
299 /* Unsent buffer that was previously
300 selected, but that couldn't be sent
301 because the lock could not be obtained
302 or the DMA engine wasn't ready. Try
304 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
307 next = gamma_select_queue(dev, gamma_dma_timer_bh);
309 q = dev->queuelist[next];
310 buf = gamma_waitlist_get(&q->waitlist);
311 dma->next_buffer = buf;
313 if (buf && buf->list == DRM_LIST_RECLAIM) {
314 gamma_clear_next_buffer(dev);
315 gamma_free_buffer(dev, buf);
318 } while (next >= 0 && !dma->next_buffer);
319 if (dma->next_buffer) {
320 if (!(retcode = gamma_do_dma(dev, locked))) {
327 if (missed != atomic_read(&dev->counts[10])) {
328 if (gamma_dma_is_ready(dev)) goto again;
330 if (processed && gamma_dma_is_ready(dev)) {
336 clear_bit(0, &dev->interrupt_flag);
338 #if DRM_DMA_HISTOGRAM
339 atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
345 static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
347 unsigned long address;
348 unsigned long length;
354 drm_buf_t *last_buf = NULL;
355 drm_device_dma_t *dma = dev->dma;
358 /* Turn off interrupt handling */
359 while (test_and_set_bit(0, &dev->interrupt_flag)) {
360 retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
364 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
365 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
366 DRM_KERNEL_CONTEXT)) {
367 retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
374 for (i = 0; i < d->send_count; i++) {
375 idx = d->send_indices[i];
376 if (idx < 0 || idx >= dma->buf_count) {
377 DRM_ERROR("Index %d (of %d max)\n",
378 d->send_indices[i], dma->buf_count - 1);
381 buf = dma->buflist[ idx ];
382 if (buf->pid != DRM_CURRENTPID) {
383 DRM_ERROR("Process %d using buffer owned by %d\n",
384 DRM_CURRENTPID, buf->pid);
388 if (buf->list != DRM_LIST_NONE) {
389 DRM_ERROR("Process %d using %d's buffer on list %d\n",
390 DRM_CURRENTPID, buf->pid, buf->list);
394 /* This isn't a race condition on
395 buf->list, since our concern is the
396 buffer reclaim during the time the
397 process closes the /dev/drm? handle, so
398 it can't also be doing DMA. */
399 buf->list = DRM_LIST_PRIO;
400 buf->used = d->send_sizes[i];
401 buf->context = d->context;
402 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
403 address = (unsigned long)buf->address;
406 DRM_ERROR("0 length buffer\n");
409 DRM_ERROR("Sending pending buffer:"
410 " buffer %d, offset %d\n",
411 d->send_indices[i], i);
416 DRM_ERROR("Sending waiting buffer:"
417 " buffer %d, offset %d\n",
418 d->send_indices[i], i);
424 if (dev->last_context != buf->context
425 && !(dev->queuelist[buf->context]->flags
426 & _DRM_CONTEXT_PRESERVED)) {
427 /* PRE: dev->last_context != buf->context */
428 DRM(context_switch)(dev, dev->last_context,
430 /* POST: we will wait for the context
431 switch and will dispatch on a later call
432 when dev->last_context == buf->context.
433 NOTE WE HOLD THE LOCK THROUGHOUT THIS
435 retcode = tsleep(&dev->context_wait, PZERO|PCATCH,
439 if (dev->last_context != buf->context) {
440 DRM_ERROR("Context mismatch: %d %d\n",
446 #if DRM_DMA_HISTOGRAM
447 buf->time_queued = get_cycles();
448 buf->time_dispatched = buf->time_queued;
450 gamma_dma_dispatch(dev, address, length);
451 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
452 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
455 gamma_free_buffer(dev, last_buf);
463 gamma_dma_ready(dev);
464 gamma_free_buffer(dev, last_buf);
467 if (must_free && !dev->context_flag) {
468 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
469 DRM_KERNEL_CONTEXT)) {
473 clear_bit(0, &dev->interrupt_flag);
474 return DRM_ERR( retcode );
477 static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
479 drm_buf_t *last_buf = NULL;
481 drm_device_dma_t *dma = dev->dma;
483 if (d->flags & _DRM_DMA_BLOCK) {
484 last_buf = dma->buflist[d->send_indices[d->send_count-1]];
485 atomic_inc(&last_buf->dma_wait);
488 if ((retcode = gamma_dma_enqueue(dev, d))) {
489 if (d->flags & _DRM_DMA_BLOCK)
490 atomic_dec(&last_buf->dma_wait);
494 gamma_dma_schedule(dev, 0);
496 if (d->flags & _DRM_DMA_BLOCK) {
497 DRM_DEBUG("%d waiting\n", DRM_CURRENTPID);
499 retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
501 if (!last_buf->waiting
502 && !last_buf->pending)
503 break; /* finished */
507 atomic_dec(&last_buf->dma_wait);
508 DRM_DEBUG("%d running\n", DRM_CURRENTPID);
510 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
511 if (!last_buf->dma_wait) {
512 gamma_free_buffer(dev, last_buf);
516 DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
520 DRM_WAITCOUNT(dev, d->context),
527 return DRM_ERR( retcode );
530 int gamma_dma( DRM_IOCTL_ARGS )
533 drm_device_dma_t *dma = dev->dma;
537 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d));
539 if (d.send_count < 0 || d.send_count > dma->buf_count) {
540 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
541 DRM_CURRENTPID, d.send_count, dma->buf_count);
542 return DRM_ERR( EINVAL );
545 if (d.request_count < 0 || d.request_count > dma->buf_count) {
546 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
547 DRM_CURRENTPID, d.request_count, dma->buf_count);
548 return DRM_ERR( EINVAL );
552 if (d.flags & _DRM_DMA_PRIORITY)
553 retcode = gamma_dma_priority(dev, &d);
555 retcode = gamma_dma_send_buffers(dev, &d);
560 if (!retcode && d.request_count) {
561 retcode = gamma_dma_get_buffers(dev, &d);
564 DRM_DEBUG("%d returning, granted = %d\n",
565 DRM_CURRENTPID, d.granted_count);
566 DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d));