1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keithw@valinux.com>
33 #define __NO_VERSION__
37 #include <linux/interrupt.h> /* For task queue support */
39 /* in case we don't have a 2.3.99-pre6 kernel or later: */
44 #define I810_BUF_FREE 2
45 #define I810_BUF_CLIENT 1
46 #define I810_BUF_HARDWARE 0
48 #define I810_BUF_UNMAPPED 0
49 #define I810_BUF_MAPPED 1
51 #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
53 #define BEGIN_LP_RING(n) do { \
55 DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
57 if (dev_priv->ring.space < n*4) \
58 i810_wait_ring(dev, n*4); \
59 dev_priv->ring.space -= n*4; \
60 outring = dev_priv->ring.tail; \
61 ringmask = dev_priv->ring.tail_mask; \
62 virt = dev_priv->ring.virtual_start; \
65 #define ADVANCE_LP_RING() do { \
66 if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
67 dev_priv->ring.tail = outring; \
68 I810_WRITE(LP_RING + RING_TAIL, outring); \
71 #define OUT_RING(n) do { \
72 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
73 *(volatile unsigned int *)(virt + outring) = n; \
75 outring &= ringmask; \
78 static inline void i810_print_status_page(drm_device_t *dev)
80 drm_device_dma_t *dma = dev->dma;
81 drm_i810_private_t *dev_priv = dev->dev_private;
82 u32 *temp = (u32 *)dev_priv->hw_status_page;
85 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
86 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
87 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
88 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
89 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
90 for(i = 6; i < dma->buf_count + 6; i++) {
91 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
95 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
97 drm_device_dma_t *dma = dev->dma;
101 /* Linear search might not be the best solution */
103 for (i = 0; i < dma->buf_count; i++) {
104 drm_buf_t *buf = dma->buflist[ i ];
105 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
106 /* In use is already a pointer */
107 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
109 if(used == I810_BUF_FREE) {
116 /* This should only be called if the buffer is not sent to the hardware
117 * yet, the hardware updates in use for us once its on the ring buffer.
120 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
122 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
125 /* In use is already a pointer */
126 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
127 if(used != I810_BUF_CLIENT) {
128 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
135 static struct file_operations i810_buffer_fops = {
138 release: DRM(release),
140 mmap: i810_mmap_buffers,
146 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
148 drm_file_t *priv = filp->private_data;
150 drm_i810_private_t *dev_priv;
152 drm_i810_buf_priv_t *buf_priv;
156 dev_priv = dev->dev_private;
157 buf = dev_priv->mmap_buffer;
158 buf_priv = buf->dev_private;
160 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
163 buf_priv->currently_mapped = I810_BUF_MAPPED;
166 if (remap_page_range(vma->vm_start,
168 vma->vm_end - vma->vm_start,
169 vma->vm_page_prot)) return -EAGAIN;
173 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
175 drm_file_t *priv = filp->private_data;
176 drm_device_t *dev = priv->dev;
177 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
178 drm_i810_private_t *dev_priv = dev->dev_private;
179 struct file_operations *old_fops;
182 if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
184 if(VM_DONTCOPY != 0) {
185 down(¤t->mm->mmap_sem);
186 old_fops = filp->f_op;
187 filp->f_op = &i810_buffer_fops;
188 dev_priv->mmap_buffer = buf;
189 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
190 PROT_READ|PROT_WRITE,
193 dev_priv->mmap_buffer = NULL;
194 filp->f_op = old_fops;
195 if ((unsigned long)buf_priv->virtual > -1024UL) {
197 DRM_DEBUG("mmap error\n");
198 retcode = (signed int)buf_priv->virtual;
199 buf_priv->virtual = 0;
201 up(¤t->mm->mmap_sem);
203 buf_priv->virtual = buf_priv->kernel_virtual;
204 buf_priv->currently_mapped = I810_BUF_MAPPED;
209 static int i810_unmap_buffer(drm_buf_t *buf)
211 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
214 if(VM_DONTCOPY != 0) {
215 if(buf_priv->currently_mapped != I810_BUF_MAPPED)
217 down(¤t->mm->mmap_sem);
218 #if LINUX_VERSION_CODE < 0x020399
219 retcode = do_munmap((unsigned long)buf_priv->virtual,
220 (size_t) buf->total);
222 retcode = do_munmap(current->mm,
223 (unsigned long)buf_priv->virtual,
224 (size_t) buf->total);
226 up(¤t->mm->mmap_sem);
228 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
229 buf_priv->virtual = 0;
234 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
237 drm_file_t *priv = filp->private_data;
239 drm_i810_buf_priv_t *buf_priv;
242 buf = i810_freelist_get(dev);
245 DRM_DEBUG("retcode=%d\n", retcode);
249 retcode = i810_map_buffer(buf, filp);
251 i810_freelist_put(dev, buf);
252 DRM_DEBUG("mapbuf failed, retcode %d\n", retcode);
255 buf->pid = priv->pid;
256 buf_priv = buf->dev_private;
258 d->request_idx = buf->idx;
259 d->request_size = buf->total;
260 d->virtual = buf_priv->virtual;
265 static unsigned long i810_alloc_page(drm_device_t *dev)
267 unsigned long address;
269 address = __get_free_page(GFP_KERNEL);
273 atomic_inc(&virt_to_page(address)->count);
274 set_bit(PG_locked, &virt_to_page(address)->flags);
279 static void i810_free_page(drm_device_t *dev, unsigned long page)
284 atomic_dec(&virt_to_page(page)->count);
285 clear_bit(PG_locked, &virt_to_page(page)->flags);
286 wake_up(&virt_to_page(page)->wait);
291 static int i810_dma_cleanup(drm_device_t *dev)
293 drm_device_dma_t *dma = dev->dma;
295 if(dev->dev_private) {
297 drm_i810_private_t *dev_priv =
298 (drm_i810_private_t *) dev->dev_private;
300 if(dev_priv->ring.virtual_start) {
301 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
302 dev_priv->ring.Size);
304 if(dev_priv->hw_status_page != 0UL) {
305 i810_free_page(dev, dev_priv->hw_status_page);
306 /* Need to rewrite hardware status page */
307 I810_WRITE(0x02080, 0x1ffff000);
309 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
311 dev->dev_private = NULL;
313 for (i = 0; i < dma->buf_count; i++) {
314 drm_buf_t *buf = dma->buflist[ i ];
315 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
316 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
322 static int i810_wait_ring(drm_device_t *dev, int n)
324 drm_i810_private_t *dev_priv = dev->dev_private;
325 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
328 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
330 end = jiffies + (HZ*3);
331 while (ring->space < n) {
334 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
335 ring->space = ring->head - (ring->tail+8);
336 if (ring->space < 0) ring->space += ring->Size;
338 if (ring->head != last_head)
339 end = jiffies + (HZ*3);
342 if((signed)(end - jiffies) <= 0) {
343 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
344 DRM_ERROR("lockup\n");
348 for (i = 0 ; i < 2000 ; i++) ;
355 static void i810_kernel_lost_context(drm_device_t *dev)
357 drm_i810_private_t *dev_priv = dev->dev_private;
358 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
360 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
361 ring->tail = I810_READ(LP_RING + RING_TAIL);
362 ring->space = ring->head - (ring->tail+8);
363 if (ring->space < 0) ring->space += ring->Size;
366 static int i810_freelist_init(drm_device_t *dev)
368 drm_device_dma_t *dma = dev->dma;
369 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
371 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
374 if(dma->buf_count > 1019) {
375 /* Not enough space in the status page for the freelist */
379 for (i = 0; i < dma->buf_count; i++) {
380 drm_buf_t *buf = dma->buflist[ i ];
381 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
383 buf_priv->in_use = hw_status++;
384 buf_priv->my_use_idx = my_idx;
387 *buf_priv->in_use = I810_BUF_FREE;
389 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
395 static int i810_dma_initialize(drm_device_t *dev,
396 drm_i810_private_t *dev_priv,
397 drm_i810_init_t *init)
399 drm_map_t *sarea_map;
401 dev->dev_private = (void *) dev_priv;
402 memset(dev_priv, 0, sizeof(drm_i810_private_t));
404 if (init->ring_map_idx >= dev->map_count ||
405 init->buffer_map_idx >= dev->map_count) {
406 i810_dma_cleanup(dev);
407 DRM_ERROR("ring_map or buffer_map are invalid\n");
411 dev_priv->ring_map_idx = init->ring_map_idx;
412 dev_priv->buffer_map_idx = init->buffer_map_idx;
413 sarea_map = dev->maplist[0];
414 dev_priv->sarea_priv = (drm_i810_sarea_t *)
415 ((u8 *)sarea_map->handle +
416 init->sarea_priv_offset);
418 atomic_set(&dev_priv->flush_done, 0);
419 init_waitqueue_head(&dev_priv->flush_queue);
421 dev_priv->ring.Start = init->ring_start;
422 dev_priv->ring.End = init->ring_end;
423 dev_priv->ring.Size = init->ring_size;
425 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
429 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
431 if (dev_priv->ring.virtual_start == NULL) {
432 i810_dma_cleanup(dev);
433 DRM_ERROR("can not ioremap virtual address for"
438 dev_priv->w = init->w;
439 dev_priv->h = init->h;
440 dev_priv->pitch = init->pitch;
441 dev_priv->back_offset = init->back_offset;
442 dev_priv->depth_offset = init->depth_offset;
444 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
445 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
446 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
449 /* Program Hardware Status Page */
450 dev_priv->hw_status_page = i810_alloc_page(dev);
451 memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
452 if(dev_priv->hw_status_page == 0UL) {
453 i810_dma_cleanup(dev);
454 DRM_ERROR("Can not allocate hardware status page\n");
457 DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
459 I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
460 DRM_DEBUG("Enabled hardware status page\n");
462 /* Now we need to init our freelist */
463 if(i810_freelist_init(dev) != 0) {
464 i810_dma_cleanup(dev);
465 DRM_ERROR("Not enough space in the status page for"
472 int i810_dma_init(struct inode *inode, struct file *filp,
473 unsigned int cmd, unsigned long arg)
475 drm_file_t *priv = filp->private_data;
476 drm_device_t *dev = priv->dev;
477 drm_i810_private_t *dev_priv;
478 drm_i810_init_t init;
481 if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
486 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
488 if(dev_priv == NULL) return -ENOMEM;
489 retcode = i810_dma_initialize(dev, dev_priv, &init);
491 case I810_CLEANUP_DMA:
492 retcode = i810_dma_cleanup(dev);
504 /* Most efficient way to verify state for the i810 is as it is
505 * emitted. Non-conformant state is silently dropped.
507 * Use 'volatile' & local var tmp to force the emitted values to be
508 * identical to the verified ones.
510 static void i810EmitContextVerified( drm_device_t *dev,
511 volatile unsigned int *code )
513 drm_i810_private_t *dev_priv = dev->dev_private;
518 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
520 OUT_RING( GFX_OP_COLOR_FACTOR );
521 OUT_RING( code[I810_CTXREG_CF1] );
523 OUT_RING( GFX_OP_STIPPLE );
524 OUT_RING( code[I810_CTXREG_ST1] );
526 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
529 if ((tmp & (7<<29)) == (3<<29) &&
530 (tmp & (0x1f<<24)) < (0x1d<<24))
543 static void i810EmitTexVerified( drm_device_t *dev,
544 volatile unsigned int *code )
546 drm_i810_private_t *dev_priv = dev->dev_private;
551 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
553 OUT_RING( GFX_OP_MAP_INFO );
554 OUT_RING( code[I810_TEXREG_MI1] );
555 OUT_RING( code[I810_TEXREG_MI2] );
556 OUT_RING( code[I810_TEXREG_MI3] );
558 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
561 if ((tmp & (7<<29)) == (3<<29) &&
562 (tmp & (0x1f<<24)) < (0x1d<<24))
576 /* Need to do some additional checking when setting the dest buffer.
578 static void i810EmitDestVerified( drm_device_t *dev,
579 volatile unsigned int *code )
581 drm_i810_private_t *dev_priv = dev->dev_private;
585 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
587 tmp = code[I810_DESTREG_DI1];
588 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
589 OUT_RING( CMD_OP_DESTBUFFER_INFO );
592 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
593 tmp, dev_priv->front_di1, dev_priv->back_di1);
597 OUT_RING( CMD_OP_Z_BUFFER_INFO );
598 OUT_RING( dev_priv->zi1 );
600 OUT_RING( GFX_OP_DESTBUFFER_VARS );
601 OUT_RING( code[I810_DESTREG_DV1] );
603 OUT_RING( GFX_OP_DRAWRECT_INFO );
604 OUT_RING( code[I810_DESTREG_DR1] );
605 OUT_RING( code[I810_DESTREG_DR2] );
606 OUT_RING( code[I810_DESTREG_DR3] );
607 OUT_RING( code[I810_DESTREG_DR4] );
615 static void i810EmitState( drm_device_t *dev )
617 drm_i810_private_t *dev_priv = dev->dev_private;
618 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
619 unsigned int dirty = sarea_priv->dirty;
621 if (dirty & I810_UPLOAD_BUFFERS) {
622 i810EmitDestVerified( dev, sarea_priv->BufferState );
623 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
626 if (dirty & I810_UPLOAD_CTX) {
627 i810EmitContextVerified( dev, sarea_priv->ContextState );
628 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
631 if (dirty & I810_UPLOAD_TEX0) {
632 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
633 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
636 if (dirty & I810_UPLOAD_TEX1) {
637 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
638 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
646 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
647 unsigned int clear_color,
648 unsigned int clear_zval )
650 drm_i810_private_t *dev_priv = dev->dev_private;
651 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
652 int nbox = sarea_priv->nbox;
653 drm_clip_rect_t *pbox = sarea_priv->boxes;
654 int pitch = dev_priv->pitch;
659 i810_kernel_lost_context(dev);
661 if (nbox > I810_NR_SAREA_CLIPRECTS)
662 nbox = I810_NR_SAREA_CLIPRECTS;
664 for (i = 0 ; i < nbox ; i++, pbox++) {
665 unsigned int x = pbox->x1;
666 unsigned int y = pbox->y1;
667 unsigned int width = (pbox->x2 - x) * cpp;
668 unsigned int height = pbox->y2 - y;
669 unsigned int start = y * pitch + x * cpp;
671 if (pbox->x1 > pbox->x2 ||
672 pbox->y1 > pbox->y2 ||
673 pbox->x2 > dev_priv->w ||
674 pbox->y2 > dev_priv->h)
677 if ( flags & I810_FRONT ) {
678 DRM_DEBUG("clear front\n");
680 OUT_RING( BR00_BITBLT_CLIENT |
681 BR00_OP_COLOR_BLT | 0x3 );
682 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
683 OUT_RING( (height << 16) | width );
685 OUT_RING( clear_color );
690 if ( flags & I810_BACK ) {
691 DRM_DEBUG("clear back\n");
693 OUT_RING( BR00_BITBLT_CLIENT |
694 BR00_OP_COLOR_BLT | 0x3 );
695 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
696 OUT_RING( (height << 16) | width );
697 OUT_RING( dev_priv->back_offset + start );
698 OUT_RING( clear_color );
703 if ( flags & I810_DEPTH ) {
704 DRM_DEBUG("clear depth\n");
706 OUT_RING( BR00_BITBLT_CLIENT |
707 BR00_OP_COLOR_BLT | 0x3 );
708 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
709 OUT_RING( (height << 16) | width );
710 OUT_RING( dev_priv->depth_offset + start );
711 OUT_RING( clear_zval );
718 static void i810_dma_dispatch_swap( drm_device_t *dev )
720 drm_i810_private_t *dev_priv = dev->dev_private;
721 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
722 int nbox = sarea_priv->nbox;
723 drm_clip_rect_t *pbox = sarea_priv->boxes;
724 int pitch = dev_priv->pitch;
726 int ofs = dev_priv->back_offset;
730 DRM_DEBUG("swapbuffers\n");
732 i810_kernel_lost_context(dev);
734 if (nbox > I810_NR_SAREA_CLIPRECTS)
735 nbox = I810_NR_SAREA_CLIPRECTS;
737 for (i = 0 ; i < nbox; i++, pbox++)
739 unsigned int w = pbox->x2 - pbox->x1;
740 unsigned int h = pbox->y2 - pbox->y1;
741 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
742 unsigned int start = ofs + dst;
744 if (pbox->x1 > pbox->x2 ||
745 pbox->y1 > pbox->y2 ||
746 pbox->x2 > dev_priv->w ||
747 pbox->y2 > dev_priv->h)
750 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
751 pbox[i].x1, pbox[i].y1,
752 pbox[i].x2, pbox[i].y2);
755 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
756 OUT_RING( pitch | (0xCC << 16));
757 OUT_RING( (h << 16) | (w * cpp));
766 static void i810_dma_dispatch_vertex(drm_device_t *dev,
771 drm_i810_private_t *dev_priv = dev->dev_private;
772 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
773 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
774 drm_clip_rect_t *box = sarea_priv->boxes;
775 int nbox = sarea_priv->nbox;
776 unsigned long address = (unsigned long)buf->bus_address;
777 unsigned long start = address - dev->agp->base;
781 i810_kernel_lost_context(dev);
783 if (nbox > I810_NR_SAREA_CLIPRECTS)
784 nbox = I810_NR_SAREA_CLIPRECTS;
787 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
789 if(u != I810_BUF_CLIENT) {
790 DRM_DEBUG("xxxx 2\n");
797 if (sarea_priv->dirty)
798 i810EmitState( dev );
800 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
801 address, used, nbox);
804 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
805 DRM_DEBUG( "i810_dma_dispatch\n");
806 DRM_DEBUG( "start : %lx\n", start);
807 DRM_DEBUG( "used : %d\n", used);
808 DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
810 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
811 *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
812 sarea_priv->vertex_prim |
816 *(u32 *)((u32)buf_priv->virtual + used) = 0;
820 i810_unmap_buffer(buf);
827 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
829 OUT_RING( GFX_OP_SCISSOR_INFO );
830 OUT_RING( box[i].x1 | (box[i].y1<<16) );
831 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
836 OUT_RING( CMD_OP_BATCH_BUFFER );
837 OUT_RING( start | BB1_PROTECTED );
838 OUT_RING( start + used - 4 );
842 } while (++i < nbox);
846 OUT_RING( CMD_STORE_DWORD_IDX );
848 OUT_RING( dev_priv->counter );
852 OUT_RING( CMD_STORE_DWORD_IDX );
853 OUT_RING( buf_priv->my_use_idx );
854 OUT_RING( I810_BUF_FREE );
858 OUT_RING( CMD_REPORT_HEAD );
864 /* Interrupts are only for flushing */
865 void i810_dma_service(int irq, void *device, struct pt_regs *regs)
867 drm_device_t *dev = (drm_device_t *)device;
870 atomic_inc(&dev->counts[_DRM_STAT_IRQ]);
871 temp = I810_READ16(I810REG_INT_IDENTITY_R);
872 temp = temp & ~(0x6000);
873 if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R,
874 temp); /* Clear all interrupts */
878 queue_task(&dev->tq, &tq_immediate);
879 mark_bh(IMMEDIATE_BH);
882 void i810_dma_immediate_bh(void *device)
884 drm_device_t *dev = (drm_device_t *) device;
885 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
887 atomic_set(&dev_priv->flush_done, 1);
888 wake_up_interruptible(&dev_priv->flush_queue);
891 static inline void i810_dma_emit_flush(drm_device_t *dev)
893 drm_i810_private_t *dev_priv = dev->dev_private;
896 i810_kernel_lost_context(dev);
899 OUT_RING( CMD_REPORT_HEAD );
900 OUT_RING( GFX_OP_USER_INTERRUPT );
903 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
904 /* atomic_set(&dev_priv->flush_done, 1); */
905 /* wake_up_interruptible(&dev_priv->flush_queue); */
908 static inline void i810_dma_quiescent_emit(drm_device_t *dev)
910 drm_i810_private_t *dev_priv = dev->dev_private;
913 i810_kernel_lost_context(dev);
916 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
917 OUT_RING( CMD_REPORT_HEAD );
919 OUT_RING( GFX_OP_USER_INTERRUPT );
922 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
923 /* atomic_set(&dev_priv->flush_done, 1); */
924 /* wake_up_interruptible(&dev_priv->flush_queue); */
927 void i810_dma_quiescent(drm_device_t *dev)
929 DECLARE_WAITQUEUE(entry, current);
930 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
933 if(dev_priv == NULL) {
936 atomic_set(&dev_priv->flush_done, 0);
937 add_wait_queue(&dev_priv->flush_queue, &entry);
938 end = jiffies + (HZ*3);
941 current->state = TASK_INTERRUPTIBLE;
942 i810_dma_quiescent_emit(dev);
943 if (atomic_read(&dev_priv->flush_done) == 1) break;
944 if((signed)(end - jiffies) <= 0) {
945 DRM_ERROR("lockup\n");
948 schedule_timeout(HZ*3);
949 if (signal_pending(current)) {
954 current->state = TASK_RUNNING;
955 remove_wait_queue(&dev_priv->flush_queue, &entry);
960 static int i810_flush_queue(drm_device_t *dev)
962 DECLARE_WAITQUEUE(entry, current);
963 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
964 drm_device_dma_t *dma = dev->dma;
968 if(dev_priv == NULL) {
971 atomic_set(&dev_priv->flush_done, 0);
972 add_wait_queue(&dev_priv->flush_queue, &entry);
973 end = jiffies + (HZ*3);
975 current->state = TASK_INTERRUPTIBLE;
976 i810_dma_emit_flush(dev);
977 if (atomic_read(&dev_priv->flush_done) == 1) break;
978 if((signed)(end - jiffies) <= 0) {
979 DRM_ERROR("lockup\n");
982 schedule_timeout(HZ*3);
983 if (signal_pending(current)) {
984 ret = -EINTR; /* Can't restart */
989 current->state = TASK_RUNNING;
990 remove_wait_queue(&dev_priv->flush_queue, &entry);
993 for (i = 0; i < dma->buf_count; i++) {
994 drm_buf_t *buf = dma->buflist[ i ];
995 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
997 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
1000 if (used == I810_BUF_HARDWARE)
1001 DRM_DEBUG("reclaimed from HARDWARE\n");
1002 if (used == I810_BUF_CLIENT)
1003 DRM_DEBUG("still on client HARDWARE\n");
1009 /* Must be called with the lock held */
1010 void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
1012 drm_device_dma_t *dma = dev->dma;
1016 if (!dev->dev_private) return;
1017 if (!dma->buflist) return;
1019 i810_flush_queue(dev);
1021 for (i = 0; i < dma->buf_count; i++) {
1022 drm_buf_t *buf = dma->buflist[ i ];
1023 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1025 if (buf->pid == pid && buf_priv) {
1026 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1029 if (used == I810_BUF_CLIENT)
1030 DRM_DEBUG("reclaimed from client\n");
1031 if(buf_priv->currently_mapped == I810_BUF_MAPPED)
1032 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
1037 int i810_flush_ioctl(struct inode *inode, struct file *filp,
1038 unsigned int cmd, unsigned long arg)
1040 drm_file_t *priv = filp->private_data;
1041 drm_device_t *dev = priv->dev;
1043 DRM_DEBUG("i810_flush_ioctl\n");
1044 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1045 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1049 i810_flush_queue(dev);
1054 int i810_dma_vertex(struct inode *inode, struct file *filp,
1055 unsigned int cmd, unsigned long arg)
1057 drm_file_t *priv = filp->private_data;
1058 drm_device_t *dev = priv->dev;
1059 drm_device_dma_t *dma = dev->dma;
1060 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1061 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1062 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1063 dev_priv->sarea_priv;
1064 drm_i810_vertex_t vertex;
1066 if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
1069 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1070 DRM_ERROR("i810_dma_vertex called without lock held\n");
1074 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1075 vertex.idx, vertex.used, vertex.discard);
1077 i810_dma_dispatch_vertex( dev,
1078 dma->buflist[ vertex.idx ],
1079 vertex.discard, vertex.used );
1081 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
1082 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1083 sarea_priv->last_enqueue = dev_priv->counter-1;
1084 sarea_priv->last_dispatch = (int) hw_status[5];
1091 int i810_clear_bufs(struct inode *inode, struct file *filp,
1092 unsigned int cmd, unsigned long arg)
1094 drm_file_t *priv = filp->private_data;
1095 drm_device_t *dev = priv->dev;
1096 drm_i810_clear_t clear;
1098 if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
1101 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1102 DRM_ERROR("i810_clear_bufs called without lock held\n");
1106 i810_dma_dispatch_clear( dev, clear.flags,
1108 clear.clear_depth );
1112 int i810_swap_bufs(struct inode *inode, struct file *filp,
1113 unsigned int cmd, unsigned long arg)
1115 drm_file_t *priv = filp->private_data;
1116 drm_device_t *dev = priv->dev;
1118 DRM_DEBUG("i810_swap_bufs\n");
1120 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1121 DRM_ERROR("i810_swap_buf called without lock held\n");
1125 i810_dma_dispatch_swap( dev );
1129 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1132 drm_file_t *priv = filp->private_data;
1133 drm_device_t *dev = priv->dev;
1134 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1135 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1136 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1137 dev_priv->sarea_priv;
1139 sarea_priv->last_dispatch = (int) hw_status[5];
1143 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1146 drm_file_t *priv = filp->private_data;
1147 drm_device_t *dev = priv->dev;
1150 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1151 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1152 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1153 dev_priv->sarea_priv;
1155 DRM_DEBUG("getbuf\n");
1156 if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
1159 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1160 DRM_ERROR("i810_dma called without lock held\n");
1166 retcode = i810_dma_get_buffer(dev, &d, filp);
1168 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1169 current->pid, retcode, d.granted);
1171 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1173 sarea_priv->last_dispatch = (int) hw_status[5];
1178 int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
1181 drm_file_t *priv = filp->private_data;
1182 drm_device_t *dev = priv->dev;
1184 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1185 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1186 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1187 dev_priv->sarea_priv;
1189 drm_i810_buf_priv_t *buf_priv;
1190 drm_device_dma_t *dma = dev->dma;
1192 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1193 DRM_ERROR("i810_dma called without lock held\n");
1197 if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d)))
1200 if(d.idx > dma->buf_count) return -EINVAL;
1201 buf = dma->buflist[ d.idx ];
1202 buf_priv = buf->dev_private;
1203 if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM;
1205 if (copy_from_user(buf_priv->virtual, d.address, d.used))
1208 sarea_priv->last_dispatch = (int) hw_status[5];
1213 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1216 if(VM_DONTCOPY == 0) return 1;