1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35 dev->pci_device == 0x2982 || \
36 dev->pci_device == 0x2992 || \
37 dev->pci_device == 0x29A2)
40 /* Really want an OS-independent resettable timer. Would like to have
41 * this loop run for (eg) 3 sec, but have the timer reset every time
42 * the head pointer changes, so that EBUSY only happens if the ring
43 * actually stalls for (eg) 3 seconds.
45 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
49 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
52 for (i = 0; i < 10000; i++) {
53 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
54 ring->space = ring->head - (ring->tail + 8);
56 ring->space += ring->Size;
60 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
62 if (ring->head != last_head)
65 last_head = ring->head;
69 return DRM_ERR(EBUSY);
72 void i915_kernel_lost_context(drm_device_t * dev)
74 drm_i915_private_t *dev_priv = dev->dev_private;
75 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
77 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
78 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
79 ring->space = ring->head - (ring->tail + 8);
81 ring->space += ring->Size;
83 if (ring->head == ring->tail)
84 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
87 static int i915_dma_cleanup(drm_device_t * dev)
89 /* Make sure interrupts are disabled here because the uninstall ioctl
90 * may not have been called from userspace and after dev_private
91 * is freed, it's too late.
94 drm_irq_uninstall(dev);
96 if (dev->dev_private) {
97 drm_i915_private_t *dev_priv =
98 (drm_i915_private_t *) dev->dev_private;
100 if (dev_priv->ring.virtual_start) {
101 drm_core_ioremapfree(&dev_priv->ring.map, dev);
104 if (dev_priv->status_page_dmah) {
105 drm_pci_free(dev, dev_priv->status_page_dmah);
106 /* Need to rewrite hardware status page */
107 I915_WRITE(0x02080, 0x1ffff000);
110 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
113 dev->dev_private = NULL;
119 static int i915_initialize(drm_device_t * dev,
120 drm_i915_private_t * dev_priv,
121 drm_i915_init_t * init)
123 memset(dev_priv, 0, sizeof(drm_i915_private_t));
126 if (!dev_priv->sarea) {
127 DRM_ERROR("can not find sarea!\n");
128 dev->dev_private = (void *)dev_priv;
129 i915_dma_cleanup(dev);
130 return DRM_ERR(EINVAL);
133 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
134 if (!dev_priv->mmio_map) {
135 dev->dev_private = (void *)dev_priv;
136 i915_dma_cleanup(dev);
137 DRM_ERROR("can not find mmio map!\n");
138 return DRM_ERR(EINVAL);
141 dev_priv->sarea_priv = (drm_i915_sarea_t *)
142 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
144 dev_priv->ring.Start = init->ring_start;
145 dev_priv->ring.End = init->ring_end;
146 dev_priv->ring.Size = init->ring_size;
147 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
149 dev_priv->ring.map.offset = init->ring_start;
150 dev_priv->ring.map.size = init->ring_size;
151 dev_priv->ring.map.type = 0;
152 dev_priv->ring.map.flags = 0;
153 dev_priv->ring.map.mtrr = 0;
155 drm_core_ioremap(&dev_priv->ring.map, dev);
157 if (dev_priv->ring.map.handle == NULL) {
158 dev->dev_private = (void *)dev_priv;
159 i915_dma_cleanup(dev);
160 DRM_ERROR("can not ioremap virtual address for"
162 return DRM_ERR(ENOMEM);
165 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
167 dev_priv->cpp = init->cpp;
168 dev_priv->back_offset = init->back_offset;
169 dev_priv->front_offset = init->front_offset;
170 dev_priv->current_page = 0;
171 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
173 /* We are using separate values as placeholders for mechanisms for
174 * private backbuffer/depthbuffer usage.
176 dev_priv->use_mi_batchbuffer_start = 0;
178 /* Allow hardware batchbuffers unless told otherwise.
180 dev_priv->allow_batchbuffer = 1;
182 /* Program Hardware Status Page */
183 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
186 if (!dev_priv->status_page_dmah) {
187 dev->dev_private = (void *)dev_priv;
188 i915_dma_cleanup(dev);
189 DRM_ERROR("Can not allocate hardware status page\n");
190 return DRM_ERR(ENOMEM);
192 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
193 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
195 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
196 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
198 I915_WRITE(0x02080, dev_priv->dma_status_page);
199 DRM_DEBUG("Enabled hardware status page\n");
200 dev->dev_private = (void *)dev_priv;
201 #ifdef I915_HAVE_BUFFER
202 drm_bo_driver_init(dev);
207 static int i915_dma_resume(drm_device_t * dev)
209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
211 DRM_DEBUG("%s\n", __FUNCTION__);
213 if (!dev_priv->sarea) {
214 DRM_ERROR("can not find sarea!\n");
215 return DRM_ERR(EINVAL);
218 if (!dev_priv->mmio_map) {
219 DRM_ERROR("can not find mmio map!\n");
220 return DRM_ERR(EINVAL);
223 if (dev_priv->ring.map.handle == NULL) {
224 DRM_ERROR("can not ioremap virtual address for"
226 return DRM_ERR(ENOMEM);
229 /* Program Hardware Status Page */
230 if (!dev_priv->hw_status_page) {
231 DRM_ERROR("Can not find hardware status page\n");
232 return DRM_ERR(EINVAL);
234 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
236 I915_WRITE(0x02080, dev_priv->dma_status_page);
237 DRM_DEBUG("Enabled hardware status page\n");
242 static int i915_dma_init(DRM_IOCTL_ARGS)
245 drm_i915_private_t *dev_priv;
246 drm_i915_init_t init;
249 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
254 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
256 if (dev_priv == NULL)
257 return DRM_ERR(ENOMEM);
258 retcode = i915_initialize(dev, dev_priv, &init);
260 case I915_CLEANUP_DMA:
261 retcode = i915_dma_cleanup(dev);
263 case I915_RESUME_DMA:
264 retcode = i915_dma_resume(dev);
267 retcode = DRM_ERR(EINVAL);
274 /* Implement basically the same security restrictions as hardware does
275 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
277 * Most of the calculations below involve calculating the size of a
278 * particular instruction. It's important to get the size right as
279 * that tells us where the next instruction to check is. Any illegal
280 * instruction detected will be given a size of zero, which is a
281 * signal to abort the rest of the buffer.
283 static int do_validate_cmd(int cmd)
285 switch (((cmd >> 29) & 0x7)) {
287 switch ((cmd >> 23) & 0x3f) {
289 return 1; /* MI_NOOP */
291 return 1; /* MI_FLUSH */
293 return 0; /* disallow everything else */
297 return 0; /* reserved */
299 return (cmd & 0xff) + 2; /* 2d commands */
301 if (((cmd >> 24) & 0x1f) <= 0x18)
304 switch ((cmd >> 24) & 0x1f) {
308 switch ((cmd >> 16) & 0xff) {
310 return (cmd & 0x1f) + 2;
312 return (cmd & 0xf) + 2;
314 return (cmd & 0xffff) + 2;
318 return (cmd & 0xffff) + 1;
322 if ((cmd & (1 << 23)) == 0) /* inline vertices */
323 return (cmd & 0x1ffff) + 2;
324 else if (cmd & (1 << 17)) /* indirect random */
325 if ((cmd & 0xffff) == 0)
326 return 0; /* unknown length, too hard */
328 return (((cmd & 0xffff) + 1) / 2) + 1;
330 return 2; /* indirect sequential */
341 static int validate_cmd(int cmd)
343 int ret = do_validate_cmd(cmd);
345 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
350 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
352 drm_i915_private_t *dev_priv = dev->dev_private;
356 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
357 return DRM_ERR(EINVAL);
359 BEGIN_LP_RING((dwords+1)&~1);
361 for (i = 0; i < dwords;) {
364 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
365 return DRM_ERR(EINVAL);
367 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
368 return DRM_ERR(EINVAL);
373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
375 return DRM_ERR(EINVAL);
389 static int i915_emit_box(drm_device_t * dev,
390 drm_clip_rect_t __user * boxes,
391 int i, int DR1, int DR4)
393 drm_i915_private_t *dev_priv = dev->dev_private;
397 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
398 return DRM_ERR(EFAULT);
401 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
402 DRM_ERROR("Bad box %d,%d..%d,%d\n",
403 box.x1, box.y1, box.x2, box.y2);
404 return DRM_ERR(EINVAL);
409 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
410 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
411 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
416 OUT_RING(GFX_OP_DRAWRECT_INFO);
418 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
419 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
428 /* XXX: Emitting the counter should really be moved to part of the IRQ
429 * emit. For now, do it in both places:
432 static void i915_emit_breadcrumb(drm_device_t *dev)
434 drm_i915_private_t *dev_priv = dev->dev_private;
437 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
440 OUT_RING(CMD_STORE_DWORD_IDX);
442 OUT_RING(dev_priv->counter);
445 #ifdef I915_HAVE_FENCE
446 drm_fence_flush_old(dev, dev_priv->counter);
451 int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
453 drm_i915_private_t *dev_priv = dev->dev_private;
454 uint32_t flush_cmd = CMD_MI_FLUSH;
459 i915_kernel_lost_context(dev);
472 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
473 drm_i915_cmdbuffer_t * cmd)
475 int nbox = cmd->num_cliprects;
476 int i = 0, count, ret;
479 DRM_ERROR("alignment");
480 return DRM_ERR(EINVAL);
483 i915_kernel_lost_context(dev);
485 count = nbox ? nbox : 1;
487 for (i = 0; i < count; i++) {
489 ret = i915_emit_box(dev, cmd->cliprects, i,
495 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
500 i915_emit_breadcrumb( dev );
504 static int i915_dispatch_batchbuffer(drm_device_t * dev,
505 drm_i915_batchbuffer_t * batch)
507 drm_i915_private_t *dev_priv = dev->dev_private;
508 drm_clip_rect_t __user *boxes = batch->cliprects;
509 int nbox = batch->num_cliprects;
513 if ((batch->start | batch->used) & 0x7) {
514 DRM_ERROR("alignment");
515 return DRM_ERR(EINVAL);
518 i915_kernel_lost_context(dev);
520 count = nbox ? nbox : 1;
522 for (i = 0; i < count; i++) {
524 int ret = i915_emit_box(dev, boxes, i,
525 batch->DR1, batch->DR4);
530 if (dev_priv->use_mi_batchbuffer_start) {
532 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
533 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
537 OUT_RING(MI_BATCH_BUFFER);
538 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
539 OUT_RING(batch->start + batch->used - 4);
545 i915_emit_breadcrumb( dev );
549 static int i915_dispatch_flip(drm_device_t * dev)
551 drm_i915_private_t *dev_priv = dev->dev_private;
554 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
556 dev_priv->current_page,
557 dev_priv->sarea_priv->pf_current_page);
559 i915_kernel_lost_context(dev);
562 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
567 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
569 if (dev_priv->current_page == 0) {
570 OUT_RING(dev_priv->back_offset);
571 dev_priv->current_page = 1;
573 OUT_RING(dev_priv->front_offset);
574 dev_priv->current_page = 0;
580 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
584 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
587 OUT_RING(CMD_STORE_DWORD_IDX);
589 OUT_RING(dev_priv->counter);
592 #ifdef I915_HAVE_FENCE
593 drm_fence_flush_old(dev, dev_priv->counter);
595 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
599 static int i915_quiescent(drm_device_t * dev)
601 drm_i915_private_t *dev_priv = dev->dev_private;
603 i915_kernel_lost_context(dev);
604 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
607 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
611 LOCK_TEST_WITH_RETURN(dev, filp);
613 return i915_quiescent(dev);
616 static int i915_batchbuffer(DRM_IOCTL_ARGS)
619 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
620 u32 *hw_status = dev_priv->hw_status_page;
621 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
622 dev_priv->sarea_priv;
623 drm_i915_batchbuffer_t batch;
626 if (!dev_priv->allow_batchbuffer) {
627 DRM_ERROR("Batchbuffer ioctl disabled\n");
628 return DRM_ERR(EINVAL);
631 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
634 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
635 batch.start, batch.used, batch.num_cliprects);
637 LOCK_TEST_WITH_RETURN(dev, filp);
639 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
640 batch.num_cliprects *
641 sizeof(drm_clip_rect_t)))
642 return DRM_ERR(EFAULT);
644 ret = i915_dispatch_batchbuffer(dev, &batch);
646 sarea_priv->last_dispatch = (int)hw_status[5];
650 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
654 u32 *hw_status = dev_priv->hw_status_page;
655 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
656 dev_priv->sarea_priv;
657 drm_i915_cmdbuffer_t cmdbuf;
660 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
663 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
664 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
666 LOCK_TEST_WITH_RETURN(dev, filp);
668 if (cmdbuf.num_cliprects &&
669 DRM_VERIFYAREA_READ(cmdbuf.cliprects,
670 cmdbuf.num_cliprects *
671 sizeof(drm_clip_rect_t))) {
672 DRM_ERROR("Fault accessing cliprects\n");
673 return DRM_ERR(EFAULT);
676 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
678 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
682 sarea_priv->last_dispatch = (int)hw_status[5];
686 static int i915_do_cleanup_pageflip(drm_device_t * dev)
688 drm_i915_private_t *dev_priv = dev->dev_private;
690 DRM_DEBUG("%s\n", __FUNCTION__);
691 if (dev_priv->current_page != 0)
692 i915_dispatch_flip(dev);
697 static int i915_flip_bufs(DRM_IOCTL_ARGS)
701 DRM_DEBUG("%s\n", __FUNCTION__);
703 LOCK_TEST_WITH_RETURN(dev, filp);
705 return i915_dispatch_flip(dev);
709 static int i915_getparam(DRM_IOCTL_ARGS)
712 drm_i915_private_t *dev_priv = dev->dev_private;
713 drm_i915_getparam_t param;
717 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
718 return DRM_ERR(EINVAL);
721 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
724 switch (param.param) {
725 case I915_PARAM_IRQ_ACTIVE:
726 value = dev->irq ? 1 : 0;
728 case I915_PARAM_ALLOW_BATCHBUFFER:
729 value = dev_priv->allow_batchbuffer ? 1 : 0;
731 case I915_PARAM_LAST_DISPATCH:
732 value = READ_BREADCRUMB(dev_priv);
735 DRM_ERROR("Unknown parameter %d\n", param.param);
736 return DRM_ERR(EINVAL);
739 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
740 DRM_ERROR("DRM_COPY_TO_USER failed\n");
741 return DRM_ERR(EFAULT);
747 static int i915_setparam(DRM_IOCTL_ARGS)
750 drm_i915_private_t *dev_priv = dev->dev_private;
751 drm_i915_setparam_t param;
754 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
755 return DRM_ERR(EINVAL);
758 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
761 switch (param.param) {
762 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
763 dev_priv->use_mi_batchbuffer_start = param.value;
765 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
766 dev_priv->tex_lru_log_granularity = param.value;
768 case I915_SETPARAM_ALLOW_BATCHBUFFER:
769 dev_priv->allow_batchbuffer = param.value;
772 DRM_ERROR("unknown parameter %d\n", param.param);
773 return DRM_ERR(EINVAL);
779 drm_i915_mmio_entry_t mmio_table[] = {
780 [MMIO_REGS_PS_DEPTH_COUNT] = {
781 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
787 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
789 static int i915_mmio(DRM_IOCTL_ARGS)
793 drm_i915_private_t *dev_priv = dev->dev_private;
794 drm_i915_mmio_entry_t *e;
795 drm_i915_mmio_t mmio;
798 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
799 return DRM_ERR(EINVAL);
801 DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_setparam_t __user *) data,
804 if (mmio.reg >= mmio_table_size)
805 return DRM_ERR(EINVAL);
807 e = &mmio_table[mmio.reg];
808 base = dev_priv->mmio_map->handle + e->offset;
810 switch (mmio.read_write) {
812 if (!(e->flag & I915_MMIO_MAY_READ))
813 return DRM_ERR(EINVAL);
814 memcpy_fromio(buf, base, e->size);
815 if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) {
816 DRM_ERROR("DRM_COPY_TO_USER failed\n");
817 return DRM_ERR(EFAULT);
821 case I915_MMIO_WRITE:
822 if (!(e->flag & I915_MMIO_MAY_WRITE))
823 return DRM_ERR(EINVAL);
824 if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) {
825 DRM_ERROR("DRM_COPY_TO_USER failed\n");
826 return DRM_ERR(EFAULT);
828 memcpy_toio(base, buf, e->size);
834 int i915_driver_load(drm_device_t *dev, unsigned long flags)
836 /* i915 has 4 more counters */
838 dev->types[6] = _DRM_STAT_IRQ;
839 dev->types[7] = _DRM_STAT_PRIMARY;
840 dev->types[8] = _DRM_STAT_SECONDARY;
841 dev->types[9] = _DRM_STAT_DMA;
846 void i915_driver_lastclose(drm_device_t * dev)
848 if (dev->dev_private) {
849 drm_i915_private_t *dev_priv = dev->dev_private;
850 i915_mem_takedown(&(dev_priv->agp_heap));
852 i915_dma_cleanup(dev);
855 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
857 if (dev->dev_private) {
858 drm_i915_private_t *dev_priv = dev->dev_private;
859 if (dev_priv->page_flipping) {
860 i915_do_cleanup_pageflip(dev);
862 i915_mem_release(dev, filp, dev_priv->agp_heap);
866 drm_ioctl_desc_t i915_ioctls[] = {
867 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
868 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
869 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
870 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
871 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
872 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
873 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
874 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
875 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
876 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
877 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
878 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
879 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
880 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
881 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
882 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
883 [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH},
886 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
889 * Determine if the device really is AGP or not.
891 * All Intel graphics chipsets are treated as AGP, even if they are really
894 * \param dev The device to be tested.
897 * A value of 1 is always retured to indictate every i9x5 is AGP.
899 int i915_driver_device_is_agp(drm_device_t * dev)