1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35 dev->pci_device == 0x2982 || \
36 dev->pci_device == 0x2992 || \
37 dev->pci_device == 0x29A2 || \
38 dev->pci_device == 0x2A02 || \
39 dev->pci_device == 0x2A12)
41 #define IS_G33(dev) (dev->pci_device == 0x29C2 || \
42 dev->pci_device == 0x29B2 || \
43 dev->pci_device == 0x29D2)
45 /* Really want an OS-independent resettable timer. Would like to have
46 * this loop run for (eg) 3 sec, but have the timer reset every time
47 * the head pointer changes, so that EBUSY only happens if the ring
48 * actually stalls for (eg) 3 seconds.
50 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
52 drm_i915_private_t *dev_priv = dev->dev_private;
53 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
54 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
57 for (i = 0; i < 10000; i++) {
58 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
59 ring->space = ring->head - (ring->tail + 8);
61 ring->space += ring->Size;
65 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
67 if (ring->head != last_head)
70 last_head = ring->head;
74 return DRM_ERR(EBUSY);
77 void i915_kernel_lost_context(drm_device_t * dev)
79 drm_i915_private_t *dev_priv = dev->dev_private;
80 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
82 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
83 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
84 ring->space = ring->head - (ring->tail + 8);
86 ring->space += ring->Size;
88 if (ring->head == ring->tail)
89 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
92 static int i915_dma_cleanup(drm_device_t * dev)
94 /* Make sure interrupts are disabled here because the uninstall ioctl
95 * may not have been called from userspace and after dev_private
96 * is freed, it's too late.
99 drm_irq_uninstall(dev);
101 if (dev->dev_private) {
102 drm_i915_private_t *dev_priv =
103 (drm_i915_private_t *) dev->dev_private;
105 if (dev_priv->ring.virtual_start) {
106 drm_core_ioremapfree(&dev_priv->ring.map, dev);
109 if (dev_priv->status_page_dmah) {
110 drm_pci_free(dev, dev_priv->status_page_dmah);
111 /* Need to rewrite hardware status page */
112 I915_WRITE(0x02080, 0x1ffff000);
114 if (dev_priv->status_gfx_addr) {
115 dev_priv->status_gfx_addr = 0;
116 drm_core_ioremapfree(&dev_priv->hws_map, dev);
117 I915_WRITE(0x02080, 0x1ffff000);
119 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
122 dev->dev_private = NULL;
128 static int i915_initialize(drm_device_t * dev,
129 drm_i915_private_t * dev_priv,
130 drm_i915_init_t * init)
132 memset(dev_priv, 0, sizeof(drm_i915_private_t));
134 dev_priv->sarea = drm_getsarea(dev);
135 if (!dev_priv->sarea) {
136 DRM_ERROR("can not find sarea!\n");
137 dev->dev_private = (void *)dev_priv;
138 i915_dma_cleanup(dev);
139 return DRM_ERR(EINVAL);
142 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
143 if (!dev_priv->mmio_map) {
144 dev->dev_private = (void *)dev_priv;
145 i915_dma_cleanup(dev);
146 DRM_ERROR("can not find mmio map!\n");
147 return DRM_ERR(EINVAL);
150 dev_priv->sarea_priv = (drm_i915_sarea_t *)
151 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
153 dev_priv->ring.Start = init->ring_start;
154 dev_priv->ring.End = init->ring_end;
155 dev_priv->ring.Size = init->ring_size;
156 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
158 dev_priv->ring.map.offset = init->ring_start;
159 dev_priv->ring.map.size = init->ring_size;
160 dev_priv->ring.map.type = 0;
161 dev_priv->ring.map.flags = 0;
162 dev_priv->ring.map.mtrr = 0;
164 drm_core_ioremap(&dev_priv->ring.map, dev);
166 if (dev_priv->ring.map.handle == NULL) {
167 dev->dev_private = (void *)dev_priv;
168 i915_dma_cleanup(dev);
169 DRM_ERROR("can not ioremap virtual address for"
171 return DRM_ERR(ENOMEM);
174 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
176 dev_priv->cpp = init->cpp;
177 dev_priv->sarea_priv->pf_current_page = 0;
179 /* We are using separate values as placeholders for mechanisms for
180 * private backbuffer/depthbuffer usage.
182 dev_priv->use_mi_batchbuffer_start = 0;
184 /* Allow hardware batchbuffers unless told otherwise.
186 dev_priv->allow_batchbuffer = 1;
188 /* Enable vblank on pipe A for older X servers
190 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
192 /* Program Hardware Status Page */
194 dev_priv->status_page_dmah =
195 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
197 if (!dev_priv->status_page_dmah) {
198 dev->dev_private = (void *)dev_priv;
199 i915_dma_cleanup(dev);
200 DRM_ERROR("Can not allocate hardware status page\n");
201 return DRM_ERR(ENOMEM);
203 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
204 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
206 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
208 I915_WRITE(0x02080, dev_priv->dma_status_page);
210 DRM_DEBUG("Enabled hardware status page\n");
211 dev->dev_private = (void *)dev_priv;
215 static int i915_dma_resume(drm_device_t * dev)
217 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
219 DRM_DEBUG("%s\n", __FUNCTION__);
221 if (!dev_priv->sarea) {
222 DRM_ERROR("can not find sarea!\n");
223 return DRM_ERR(EINVAL);
226 if (!dev_priv->mmio_map) {
227 DRM_ERROR("can not find mmio map!\n");
228 return DRM_ERR(EINVAL);
231 if (dev_priv->ring.map.handle == NULL) {
232 DRM_ERROR("can not ioremap virtual address for"
234 return DRM_ERR(ENOMEM);
237 /* Program Hardware Status Page */
238 if (!dev_priv->hw_status_page) {
239 DRM_ERROR("Can not find hardware status page\n");
240 return DRM_ERR(EINVAL);
242 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
244 if (dev_priv->status_gfx_addr != 0)
245 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
247 I915_WRITE(0x02080, dev_priv->dma_status_page);
248 DRM_DEBUG("Enabled hardware status page\n");
253 static int i915_dma_init(DRM_IOCTL_ARGS)
256 drm_i915_private_t *dev_priv;
257 drm_i915_init_t init;
260 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
265 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
267 if (dev_priv == NULL)
268 return DRM_ERR(ENOMEM);
269 retcode = i915_initialize(dev, dev_priv, &init);
271 case I915_CLEANUP_DMA:
272 retcode = i915_dma_cleanup(dev);
274 case I915_RESUME_DMA:
275 retcode = i915_dma_resume(dev);
278 retcode = DRM_ERR(EINVAL);
285 /* Implement basically the same security restrictions as hardware does
286 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
288 * Most of the calculations below involve calculating the size of a
289 * particular instruction. It's important to get the size right as
290 * that tells us where the next instruction to check is. Any illegal
291 * instruction detected will be given a size of zero, which is a
292 * signal to abort the rest of the buffer.
294 static int do_validate_cmd(int cmd)
296 switch (((cmd >> 29) & 0x7)) {
298 switch ((cmd >> 23) & 0x3f) {
300 return 1; /* MI_NOOP */
302 return 1; /* MI_FLUSH */
304 return 0; /* disallow everything else */
308 return 0; /* reserved */
310 return (cmd & 0xff) + 2; /* 2d commands */
312 if (((cmd >> 24) & 0x1f) <= 0x18)
315 switch ((cmd >> 24) & 0x1f) {
319 switch ((cmd >> 16) & 0xff) {
321 return (cmd & 0x1f) + 2;
323 return (cmd & 0xf) + 2;
325 return (cmd & 0xffff) + 2;
329 return (cmd & 0xffff) + 1;
333 if ((cmd & (1 << 23)) == 0) /* inline vertices */
334 return (cmd & 0x1ffff) + 2;
335 else if (cmd & (1 << 17)) /* indirect random */
336 if ((cmd & 0xffff) == 0)
337 return 0; /* unknown length, too hard */
339 return (((cmd & 0xffff) + 1) / 2) + 1;
341 return 2; /* indirect sequential */
352 static int validate_cmd(int cmd)
354 int ret = do_validate_cmd(cmd);
356 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
361 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
363 drm_i915_private_t *dev_priv = dev->dev_private;
367 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
368 return DRM_ERR(EINVAL);
370 BEGIN_LP_RING((dwords+1)&~1);
372 for (i = 0; i < dwords;) {
375 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
376 return DRM_ERR(EINVAL);
378 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
379 return DRM_ERR(EINVAL);
384 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
386 return DRM_ERR(EINVAL);
400 static int i915_emit_box(drm_device_t * dev,
401 drm_clip_rect_t __user * boxes,
402 int i, int DR1, int DR4)
404 drm_i915_private_t *dev_priv = dev->dev_private;
408 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
409 return DRM_ERR(EFAULT);
412 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
413 DRM_ERROR("Bad box %d,%d..%d,%d\n",
414 box.x1, box.y1, box.x2, box.y2);
415 return DRM_ERR(EINVAL);
420 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
421 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
422 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
427 OUT_RING(GFX_OP_DRAWRECT_INFO);
429 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
430 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
439 /* XXX: Emitting the counter should really be moved to part of the IRQ
440 * emit. For now, do it in both places:
443 void i915_emit_breadcrumb(drm_device_t *dev)
445 drm_i915_private_t *dev_priv = dev->dev_private;
448 if (++dev_priv->counter > BREADCRUMB_MASK) {
449 dev_priv->counter = 1;
450 DRM_DEBUG("Breadcrumb counter wrapped around\n");
453 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
456 OUT_RING(CMD_STORE_DWORD_IDX);
458 OUT_RING(dev_priv->counter);
464 int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 uint32_t flush_cmd = CMD_MI_FLUSH;
472 i915_kernel_lost_context(dev);
485 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
486 drm_i915_cmdbuffer_t * cmd)
488 #ifdef I915_HAVE_FENCE
489 drm_i915_private_t *dev_priv = dev->dev_private;
491 int nbox = cmd->num_cliprects;
492 int i = 0, count, ret;
495 DRM_ERROR("alignment");
496 return DRM_ERR(EINVAL);
499 i915_kernel_lost_context(dev);
501 count = nbox ? nbox : 1;
503 for (i = 0; i < count; i++) {
505 ret = i915_emit_box(dev, cmd->cliprects, i,
511 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
516 i915_emit_breadcrumb( dev );
517 #ifdef I915_HAVE_FENCE
518 drm_fence_flush_old(dev, 0, dev_priv->counter);
523 static int i915_dispatch_batchbuffer(drm_device_t * dev,
524 drm_i915_batchbuffer_t * batch)
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 drm_clip_rect_t __user *boxes = batch->cliprects;
528 int nbox = batch->num_cliprects;
532 if ((batch->start | batch->used) & 0x7) {
533 DRM_ERROR("alignment");
534 return DRM_ERR(EINVAL);
537 i915_kernel_lost_context(dev);
539 count = nbox ? nbox : 1;
541 for (i = 0; i < count; i++) {
543 int ret = i915_emit_box(dev, boxes, i,
544 batch->DR1, batch->DR4);
549 if (dev_priv->use_mi_batchbuffer_start) {
551 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
552 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
556 OUT_RING(MI_BATCH_BUFFER);
557 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
558 OUT_RING(batch->start + batch->used - 4);
564 i915_emit_breadcrumb( dev );
565 #ifdef I915_HAVE_FENCE
566 drm_fence_flush_old(dev, 0, dev_priv->counter);
571 static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync)
573 drm_i915_private_t *dev_priv = dev->dev_private;
574 u32 num_pages, current_page, next_page, dspbase;
575 int shift = 2 * pipe, x, y;
578 /* Calculate display base offset */
579 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
580 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
581 next_page = (current_page + 1) % num_pages;
586 dspbase = dev_priv->sarea_priv->front_offset;
589 dspbase = dev_priv->sarea_priv->back_offset;
592 dspbase = dev_priv->sarea_priv->third_offset;
597 x = dev_priv->sarea_priv->pipeA_x;
598 y = dev_priv->sarea_priv->pipeA_y;
600 x = dev_priv->sarea_priv->pipeB_x;
601 y = dev_priv->sarea_priv->pipeB_y;
604 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
606 DRM_DEBUG("pipe=%d current_page=%d dspbase=0x%x\n", pipe, current_page,
611 (MI_WAIT_FOR_EVENT | (pipe ? MI_WAIT_FOR_PLANE_B_FLIP :
612 MI_WAIT_FOR_PLANE_A_FLIP)));
613 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
614 (pipe ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
615 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
619 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
620 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
623 void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync)
625 drm_i915_private_t *dev_priv = dev->dev_private;
628 DRM_DEBUG("%s: pipes=0x%x pfCurrentPage=%d\n",
630 pipes, dev_priv->sarea_priv->pf_current_page);
632 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
634 for (i = 0; i < 2; i++)
635 if (pipes & (1 << i))
636 i915_do_dispatch_flip(dev, i, sync);
638 i915_emit_breadcrumb(dev);
639 #ifdef I915_HAVE_FENCE
641 drm_fence_flush_old(dev, 0, dev_priv->counter);
645 static int i915_quiescent(drm_device_t * dev)
647 drm_i915_private_t *dev_priv = dev->dev_private;
649 i915_kernel_lost_context(dev);
650 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
653 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
657 LOCK_TEST_WITH_RETURN(dev, filp);
659 return i915_quiescent(dev);
662 static int i915_batchbuffer(DRM_IOCTL_ARGS)
665 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
666 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
667 dev_priv->sarea_priv;
668 drm_i915_batchbuffer_t batch;
671 if (!dev_priv->allow_batchbuffer) {
672 DRM_ERROR("Batchbuffer ioctl disabled\n");
673 return DRM_ERR(EINVAL);
676 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
679 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
680 batch.start, batch.used, batch.num_cliprects);
682 LOCK_TEST_WITH_RETURN(dev, filp);
684 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
685 batch.num_cliprects *
686 sizeof(drm_clip_rect_t)))
687 return DRM_ERR(EFAULT);
689 ret = i915_dispatch_batchbuffer(dev, &batch);
691 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
695 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
698 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
699 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
700 dev_priv->sarea_priv;
701 drm_i915_cmdbuffer_t cmdbuf;
704 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
707 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
708 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
710 LOCK_TEST_WITH_RETURN(dev, filp);
712 if (cmdbuf.num_cliprects &&
713 DRM_VERIFYAREA_READ(cmdbuf.cliprects,
714 cmdbuf.num_cliprects *
715 sizeof(drm_clip_rect_t))) {
716 DRM_ERROR("Fault accessing cliprects\n");
717 return DRM_ERR(EFAULT);
720 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
722 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
726 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
730 static int i915_do_cleanup_pageflip(drm_device_t * dev)
732 drm_i915_private_t *dev_priv = dev->dev_private;
733 int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
735 DRM_DEBUG("%s\n", __FUNCTION__);
737 for (i = 0, pipes = 0; i < 2; i++)
738 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
739 dev_priv->sarea_priv->pf_current_page =
740 (dev_priv->sarea_priv->pf_current_page &
741 ~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i);
747 i915_dispatch_flip(dev, pipes, 0);
752 static int i915_flip_bufs(DRM_IOCTL_ARGS)
755 drm_i915_flip_t param;
757 DRM_DEBUG("%s\n", __FUNCTION__);
759 LOCK_TEST_WITH_RETURN(dev, filp);
761 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_flip_t __user *) data,
764 if (param.pipes & ~0x3) {
765 DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n",
767 return DRM_ERR(EINVAL);
770 i915_dispatch_flip(dev, param.pipes, 0);
776 static int i915_getparam(DRM_IOCTL_ARGS)
779 drm_i915_private_t *dev_priv = dev->dev_private;
780 drm_i915_getparam_t param;
784 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
785 return DRM_ERR(EINVAL);
788 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
791 switch (param.param) {
792 case I915_PARAM_IRQ_ACTIVE:
793 value = dev->irq ? 1 : 0;
795 case I915_PARAM_ALLOW_BATCHBUFFER:
796 value = dev_priv->allow_batchbuffer ? 1 : 0;
798 case I915_PARAM_LAST_DISPATCH:
799 value = READ_BREADCRUMB(dev_priv);
802 DRM_ERROR("Unknown parameter %d\n", param.param);
803 return DRM_ERR(EINVAL);
806 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
807 DRM_ERROR("DRM_COPY_TO_USER failed\n");
808 return DRM_ERR(EFAULT);
814 static int i915_setparam(DRM_IOCTL_ARGS)
817 drm_i915_private_t *dev_priv = dev->dev_private;
818 drm_i915_setparam_t param;
821 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
822 return DRM_ERR(EINVAL);
825 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
828 switch (param.param) {
829 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
830 dev_priv->use_mi_batchbuffer_start = param.value;
832 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
833 dev_priv->tex_lru_log_granularity = param.value;
835 case I915_SETPARAM_ALLOW_BATCHBUFFER:
836 dev_priv->allow_batchbuffer = param.value;
839 DRM_ERROR("unknown parameter %d\n", param.param);
840 return DRM_ERR(EINVAL);
846 drm_i915_mmio_entry_t mmio_table[] = {
847 [MMIO_REGS_PS_DEPTH_COUNT] = {
848 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
854 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
856 static int i915_mmio(DRM_IOCTL_ARGS)
860 drm_i915_private_t *dev_priv = dev->dev_private;
861 drm_i915_mmio_entry_t *e;
862 drm_i915_mmio_t mmio;
865 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
866 return DRM_ERR(EINVAL);
868 DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data,
871 if (mmio.reg >= mmio_table_size)
872 return DRM_ERR(EINVAL);
874 e = &mmio_table[mmio.reg];
875 base = (u8 *) dev_priv->mmio_map->handle + e->offset;
877 switch (mmio.read_write) {
879 if (!(e->flag & I915_MMIO_MAY_READ))
880 return DRM_ERR(EINVAL);
881 memcpy_fromio(buf, base, e->size);
882 if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) {
883 DRM_ERROR("DRM_COPY_TO_USER failed\n");
884 return DRM_ERR(EFAULT);
888 case I915_MMIO_WRITE:
889 if (!(e->flag & I915_MMIO_MAY_WRITE))
890 return DRM_ERR(EINVAL);
891 if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) {
892 DRM_ERROR("DRM_COPY_TO_USER failed\n");
893 return DRM_ERR(EFAULT);
895 memcpy_toio(base, buf, e->size);
901 static int i915_set_status_page(DRM_IOCTL_ARGS)
904 drm_i915_private_t *dev_priv = dev->dev_private;
905 drm_i915_hws_addr_t hws;
908 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
909 return DRM_ERR(EINVAL);
911 DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
913 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
915 dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
917 dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr;
918 dev_priv->hws_map.size = 4*1024;
919 dev_priv->hws_map.type = 0;
920 dev_priv->hws_map.flags = 0;
921 dev_priv->hws_map.mtrr = 0;
923 drm_core_ioremap(&dev_priv->hws_map, dev);
924 if (dev_priv->hws_map.handle == NULL) {
925 dev->dev_private = (void *)dev_priv;
926 i915_dma_cleanup(dev);
927 dev_priv->status_gfx_addr = 0;
928 DRM_ERROR("can not ioremap virtual address for"
929 " G33 hw status page\n");
930 return DRM_ERR(ENOMEM);
932 dev_priv->hw_status_page = dev_priv->hws_map.handle;
934 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
935 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
936 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
937 dev_priv->status_gfx_addr);
938 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
942 int i915_driver_load(drm_device_t *dev, unsigned long flags)
944 /* i915 has 4 more counters */
946 dev->types[6] = _DRM_STAT_IRQ;
947 dev->types[7] = _DRM_STAT_PRIMARY;
948 dev->types[8] = _DRM_STAT_SECONDARY;
949 dev->types[9] = _DRM_STAT_DMA;
954 void i915_driver_lastclose(drm_device_t * dev)
956 if (dev->dev_private) {
957 drm_i915_private_t *dev_priv = dev->dev_private;
958 i915_do_cleanup_pageflip(dev);
959 i915_mem_takedown(&(dev_priv->agp_heap));
961 i915_dma_cleanup(dev);
964 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
966 if (dev->dev_private) {
967 drm_i915_private_t *dev_priv = dev->dev_private;
968 i915_mem_release(dev, filp, dev_priv->agp_heap);
972 drm_ioctl_desc_t i915_ioctls[] = {
973 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
974 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
975 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
976 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
977 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
978 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
979 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
980 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
981 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
982 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
983 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
984 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
985 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
986 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
987 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
988 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
989 [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH},
990 [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH},
993 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
996 * Determine if the device really is AGP or not.
998 * All Intel graphics chipsets are treated as AGP, even if they are really
1001 * \param dev The device to be tested.
1004 * A value of 1 is always retured to indictate every i9x5 is AGP.
1006 int i915_driver_device_is_agp(drm_device_t * dev)
1011 int i915_driver_firstopen(struct drm_device *dev)
1013 #ifdef I915_HAVE_BUFFER
1014 drm_bo_driver_init(dev);