1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35 dev->pci_device == 0x2982 || \
36 dev->pci_device == 0x2992 || \
37 dev->pci_device == 0x29A2 || \
38 dev->pci_device == 0x2A02)
41 /* Really want an OS-independent resettable timer. Would like to have
42 * this loop run for (eg) 3 sec, but have the timer reset every time
43 * the head pointer changes, so that EBUSY only happens if the ring
44 * actually stalls for (eg) 3 seconds.
46 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
50 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
53 for (i = 0; i < 10000; i++) {
54 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
55 ring->space = ring->head - (ring->tail + 8);
57 ring->space += ring->Size;
61 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
63 if (ring->head != last_head)
66 last_head = ring->head;
70 return DRM_ERR(EBUSY);
73 void i915_kernel_lost_context(drm_device_t * dev)
75 drm_i915_private_t *dev_priv = dev->dev_private;
76 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
78 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
79 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
80 ring->space = ring->head - (ring->tail + 8);
82 ring->space += ring->Size;
84 if (ring->head == ring->tail)
85 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
88 static int i915_dma_cleanup(drm_device_t * dev)
90 /* Make sure interrupts are disabled here because the uninstall ioctl
91 * may not have been called from userspace and after dev_private
92 * is freed, it's too late.
95 drm_irq_uninstall(dev);
97 if (dev->dev_private) {
98 drm_i915_private_t *dev_priv =
99 (drm_i915_private_t *) dev->dev_private;
101 if (dev_priv->ring.virtual_start) {
102 drm_core_ioremapfree(&dev_priv->ring.map, dev);
105 if (dev_priv->status_page_dmah) {
106 drm_pci_free(dev, dev_priv->status_page_dmah);
107 /* Need to rewrite hardware status page */
108 I915_WRITE(0x02080, 0x1ffff000);
111 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
114 dev->dev_private = NULL;
120 static int i915_initialize(drm_device_t * dev,
121 drm_i915_private_t * dev_priv,
122 drm_i915_init_t * init)
124 memset(dev_priv, 0, sizeof(drm_i915_private_t));
127 if (!dev_priv->sarea) {
128 DRM_ERROR("can not find sarea!\n");
129 dev->dev_private = (void *)dev_priv;
130 i915_dma_cleanup(dev);
131 return DRM_ERR(EINVAL);
134 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
135 if (!dev_priv->mmio_map) {
136 dev->dev_private = (void *)dev_priv;
137 i915_dma_cleanup(dev);
138 DRM_ERROR("can not find mmio map!\n");
139 return DRM_ERR(EINVAL);
142 dev_priv->sarea_priv = (drm_i915_sarea_t *)
143 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
145 dev_priv->ring.Start = init->ring_start;
146 dev_priv->ring.End = init->ring_end;
147 dev_priv->ring.Size = init->ring_size;
148 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
150 dev_priv->ring.map.offset = init->ring_start;
151 dev_priv->ring.map.size = init->ring_size;
152 dev_priv->ring.map.type = 0;
153 dev_priv->ring.map.flags = 0;
154 dev_priv->ring.map.mtrr = 0;
156 drm_core_ioremap(&dev_priv->ring.map, dev);
158 if (dev_priv->ring.map.handle == NULL) {
159 dev->dev_private = (void *)dev_priv;
160 i915_dma_cleanup(dev);
161 DRM_ERROR("can not ioremap virtual address for"
163 return DRM_ERR(ENOMEM);
166 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
168 dev_priv->cpp = init->cpp;
169 dev_priv->back_offset = init->back_offset;
170 dev_priv->front_offset = init->front_offset;
171 dev_priv->current_page = 0;
172 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
174 /* We are using separate values as placeholders for mechanisms for
175 * private backbuffer/depthbuffer usage.
177 dev_priv->use_mi_batchbuffer_start = 0;
179 /* Allow hardware batchbuffers unless told otherwise.
181 dev_priv->allow_batchbuffer = 1;
183 /* Program Hardware Status Page */
184 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
187 if (!dev_priv->status_page_dmah) {
188 dev->dev_private = (void *)dev_priv;
189 i915_dma_cleanup(dev);
190 DRM_ERROR("Can not allocate hardware status page\n");
191 return DRM_ERR(ENOMEM);
193 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
194 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
196 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
197 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
199 I915_WRITE(0x02080, dev_priv->dma_status_page);
200 DRM_DEBUG("Enabled hardware status page\n");
201 dev->dev_private = (void *)dev_priv;
202 #ifdef I915_HAVE_BUFFER
203 drm_bo_driver_init(dev);
208 static int i915_dma_resume(drm_device_t * dev)
210 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
212 DRM_DEBUG("%s\n", __FUNCTION__);
214 if (!dev_priv->sarea) {
215 DRM_ERROR("can not find sarea!\n");
216 return DRM_ERR(EINVAL);
219 if (!dev_priv->mmio_map) {
220 DRM_ERROR("can not find mmio map!\n");
221 return DRM_ERR(EINVAL);
224 if (dev_priv->ring.map.handle == NULL) {
225 DRM_ERROR("can not ioremap virtual address for"
227 return DRM_ERR(ENOMEM);
230 /* Program Hardware Status Page */
231 if (!dev_priv->hw_status_page) {
232 DRM_ERROR("Can not find hardware status page\n");
233 return DRM_ERR(EINVAL);
235 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
237 I915_WRITE(0x02080, dev_priv->dma_status_page);
238 DRM_DEBUG("Enabled hardware status page\n");
243 static int i915_dma_init(DRM_IOCTL_ARGS)
246 drm_i915_private_t *dev_priv;
247 drm_i915_init_t init;
250 DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
255 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
257 if (dev_priv == NULL)
258 return DRM_ERR(ENOMEM);
259 retcode = i915_initialize(dev, dev_priv, &init);
261 case I915_CLEANUP_DMA:
262 retcode = i915_dma_cleanup(dev);
264 case I915_RESUME_DMA:
265 retcode = i915_dma_resume(dev);
268 retcode = DRM_ERR(EINVAL);
275 /* Implement basically the same security restrictions as hardware does
276 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
278 * Most of the calculations below involve calculating the size of a
279 * particular instruction. It's important to get the size right as
280 * that tells us where the next instruction to check is. Any illegal
281 * instruction detected will be given a size of zero, which is a
282 * signal to abort the rest of the buffer.
284 static int do_validate_cmd(int cmd)
286 switch (((cmd >> 29) & 0x7)) {
288 switch ((cmd >> 23) & 0x3f) {
290 return 1; /* MI_NOOP */
292 return 1; /* MI_FLUSH */
294 return 0; /* disallow everything else */
298 return 0; /* reserved */
300 return (cmd & 0xff) + 2; /* 2d commands */
302 if (((cmd >> 24) & 0x1f) <= 0x18)
305 switch ((cmd >> 24) & 0x1f) {
309 switch ((cmd >> 16) & 0xff) {
311 return (cmd & 0x1f) + 2;
313 return (cmd & 0xf) + 2;
315 return (cmd & 0xffff) + 2;
319 return (cmd & 0xffff) + 1;
323 if ((cmd & (1 << 23)) == 0) /* inline vertices */
324 return (cmd & 0x1ffff) + 2;
325 else if (cmd & (1 << 17)) /* indirect random */
326 if ((cmd & 0xffff) == 0)
327 return 0; /* unknown length, too hard */
329 return (((cmd & 0xffff) + 1) / 2) + 1;
331 return 2; /* indirect sequential */
342 static int validate_cmd(int cmd)
344 int ret = do_validate_cmd(cmd);
346 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
351 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
353 drm_i915_private_t *dev_priv = dev->dev_private;
357 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
358 return DRM_ERR(EINVAL);
360 BEGIN_LP_RING((dwords+1)&~1);
362 for (i = 0; i < dwords;) {
365 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
366 return DRM_ERR(EINVAL);
368 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
369 return DRM_ERR(EINVAL);
374 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
376 return DRM_ERR(EINVAL);
390 static int i915_emit_box(drm_device_t * dev,
391 drm_clip_rect_t __user * boxes,
392 int i, int DR1, int DR4)
394 drm_i915_private_t *dev_priv = dev->dev_private;
398 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
399 return DRM_ERR(EFAULT);
402 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
403 DRM_ERROR("Bad box %d,%d..%d,%d\n",
404 box.x1, box.y1, box.x2, box.y2);
405 return DRM_ERR(EINVAL);
410 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
411 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
412 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
417 OUT_RING(GFX_OP_DRAWRECT_INFO);
419 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
420 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
429 /* XXX: Emitting the counter should really be moved to part of the IRQ
430 * emit. For now, do it in both places:
433 static void i915_emit_breadcrumb(drm_device_t *dev)
435 drm_i915_private_t *dev_priv = dev->dev_private;
438 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
441 OUT_RING(CMD_STORE_DWORD_IDX);
443 OUT_RING(dev_priv->counter);
446 #ifdef I915_HAVE_FENCE
447 drm_fence_flush_old(dev, 0, dev_priv->counter);
452 int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
454 drm_i915_private_t *dev_priv = dev->dev_private;
455 uint32_t flush_cmd = CMD_MI_FLUSH;
460 i915_kernel_lost_context(dev);
473 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
474 drm_i915_cmdbuffer_t * cmd)
476 int nbox = cmd->num_cliprects;
477 int i = 0, count, ret;
480 DRM_ERROR("alignment");
481 return DRM_ERR(EINVAL);
484 i915_kernel_lost_context(dev);
486 count = nbox ? nbox : 1;
488 for (i = 0; i < count; i++) {
490 ret = i915_emit_box(dev, cmd->cliprects, i,
496 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
501 i915_emit_breadcrumb( dev );
505 static int i915_dispatch_batchbuffer(drm_device_t * dev,
506 drm_i915_batchbuffer_t * batch)
508 drm_i915_private_t *dev_priv = dev->dev_private;
509 drm_clip_rect_t __user *boxes = batch->cliprects;
510 int nbox = batch->num_cliprects;
514 if ((batch->start | batch->used) & 0x7) {
515 DRM_ERROR("alignment");
516 return DRM_ERR(EINVAL);
519 i915_kernel_lost_context(dev);
521 count = nbox ? nbox : 1;
523 for (i = 0; i < count; i++) {
525 int ret = i915_emit_box(dev, boxes, i,
526 batch->DR1, batch->DR4);
531 if (dev_priv->use_mi_batchbuffer_start) {
533 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
534 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
538 OUT_RING(MI_BATCH_BUFFER);
539 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
540 OUT_RING(batch->start + batch->used - 4);
546 i915_emit_breadcrumb( dev );
550 static int i915_dispatch_flip(drm_device_t * dev)
552 drm_i915_private_t *dev_priv = dev->dev_private;
555 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
557 dev_priv->current_page,
558 dev_priv->sarea_priv->pf_current_page);
560 i915_kernel_lost_context(dev);
563 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
568 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
570 if (dev_priv->current_page == 0) {
571 OUT_RING(dev_priv->back_offset);
572 dev_priv->current_page = 1;
574 OUT_RING(dev_priv->front_offset);
575 dev_priv->current_page = 0;
581 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
585 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
588 OUT_RING(CMD_STORE_DWORD_IDX);
590 OUT_RING(dev_priv->counter);
593 #ifdef I915_HAVE_FENCE
594 drm_fence_flush_old(dev, 0, dev_priv->counter);
596 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
600 static int i915_quiescent(drm_device_t * dev)
602 drm_i915_private_t *dev_priv = dev->dev_private;
604 i915_kernel_lost_context(dev);
605 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
608 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
612 LOCK_TEST_WITH_RETURN(dev, filp);
614 return i915_quiescent(dev);
617 static int i915_batchbuffer(DRM_IOCTL_ARGS)
620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621 u32 *hw_status = dev_priv->hw_status_page;
622 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
623 dev_priv->sarea_priv;
624 drm_i915_batchbuffer_t batch;
627 if (!dev_priv->allow_batchbuffer) {
628 DRM_ERROR("Batchbuffer ioctl disabled\n");
629 return DRM_ERR(EINVAL);
632 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
635 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
636 batch.start, batch.used, batch.num_cliprects);
638 LOCK_TEST_WITH_RETURN(dev, filp);
640 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
641 batch.num_cliprects *
642 sizeof(drm_clip_rect_t)))
643 return DRM_ERR(EFAULT);
645 ret = i915_dispatch_batchbuffer(dev, &batch);
647 sarea_priv->last_dispatch = (int)hw_status[5];
651 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
654 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
655 u32 *hw_status = dev_priv->hw_status_page;
656 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
657 dev_priv->sarea_priv;
658 drm_i915_cmdbuffer_t cmdbuf;
661 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
665 cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
667 LOCK_TEST_WITH_RETURN(dev, filp);
669 if (cmdbuf.num_cliprects &&
670 DRM_VERIFYAREA_READ(cmdbuf.cliprects,
671 cmdbuf.num_cliprects *
672 sizeof(drm_clip_rect_t))) {
673 DRM_ERROR("Fault accessing cliprects\n");
674 return DRM_ERR(EFAULT);
677 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
679 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
683 sarea_priv->last_dispatch = (int)hw_status[5];
687 static int i915_do_cleanup_pageflip(drm_device_t * dev)
689 drm_i915_private_t *dev_priv = dev->dev_private;
691 DRM_DEBUG("%s\n", __FUNCTION__);
692 if (dev_priv->current_page != 0)
693 i915_dispatch_flip(dev);
698 static int i915_flip_bufs(DRM_IOCTL_ARGS)
702 DRM_DEBUG("%s\n", __FUNCTION__);
704 LOCK_TEST_WITH_RETURN(dev, filp);
706 return i915_dispatch_flip(dev);
710 static int i915_getparam(DRM_IOCTL_ARGS)
713 drm_i915_private_t *dev_priv = dev->dev_private;
714 drm_i915_getparam_t param;
718 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
719 return DRM_ERR(EINVAL);
722 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
725 switch (param.param) {
726 case I915_PARAM_IRQ_ACTIVE:
727 value = dev->irq ? 1 : 0;
729 case I915_PARAM_ALLOW_BATCHBUFFER:
730 value = dev_priv->allow_batchbuffer ? 1 : 0;
732 case I915_PARAM_LAST_DISPATCH:
733 value = READ_BREADCRUMB(dev_priv);
736 DRM_ERROR("Unknown parameter %d\n", param.param);
737 return DRM_ERR(EINVAL);
740 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
741 DRM_ERROR("DRM_COPY_TO_USER failed\n");
742 return DRM_ERR(EFAULT);
748 static int i915_setparam(DRM_IOCTL_ARGS)
751 drm_i915_private_t *dev_priv = dev->dev_private;
752 drm_i915_setparam_t param;
755 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
756 return DRM_ERR(EINVAL);
759 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
762 switch (param.param) {
763 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
764 dev_priv->use_mi_batchbuffer_start = param.value;
766 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
767 dev_priv->tex_lru_log_granularity = param.value;
769 case I915_SETPARAM_ALLOW_BATCHBUFFER:
770 dev_priv->allow_batchbuffer = param.value;
773 DRM_ERROR("unknown parameter %d\n", param.param);
774 return DRM_ERR(EINVAL);
780 drm_i915_mmio_entry_t mmio_table[] = {
781 [MMIO_REGS_PS_DEPTH_COUNT] = {
782 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
788 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
790 static int i915_mmio(DRM_IOCTL_ARGS)
794 drm_i915_private_t *dev_priv = dev->dev_private;
795 drm_i915_mmio_entry_t *e;
796 drm_i915_mmio_t mmio;
799 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
800 return DRM_ERR(EINVAL);
802 DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data,
805 if (mmio.reg >= mmio_table_size)
806 return DRM_ERR(EINVAL);
808 e = &mmio_table[mmio.reg];
809 base = dev_priv->mmio_map->handle + e->offset;
811 switch (mmio.read_write) {
813 if (!(e->flag & I915_MMIO_MAY_READ))
814 return DRM_ERR(EINVAL);
815 memcpy_fromio(buf, base, e->size);
816 if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) {
817 DRM_ERROR("DRM_COPY_TO_USER failed\n");
818 return DRM_ERR(EFAULT);
822 case I915_MMIO_WRITE:
823 if (!(e->flag & I915_MMIO_MAY_WRITE))
824 return DRM_ERR(EINVAL);
825 if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) {
826 DRM_ERROR("DRM_COPY_TO_USER failed\n");
827 return DRM_ERR(EFAULT);
829 memcpy_toio(base, buf, e->size);
835 int i915_driver_load(drm_device_t *dev, unsigned long flags)
837 /* i915 has 4 more counters */
839 dev->types[6] = _DRM_STAT_IRQ;
840 dev->types[7] = _DRM_STAT_PRIMARY;
841 dev->types[8] = _DRM_STAT_SECONDARY;
842 dev->types[9] = _DRM_STAT_DMA;
847 void i915_driver_lastclose(drm_device_t * dev)
849 if (dev->dev_private) {
850 drm_i915_private_t *dev_priv = dev->dev_private;
851 i915_mem_takedown(&(dev_priv->agp_heap));
853 i915_dma_cleanup(dev);
856 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
858 if (dev->dev_private) {
859 drm_i915_private_t *dev_priv = dev->dev_private;
860 if (dev_priv->page_flipping) {
861 i915_do_cleanup_pageflip(dev);
863 i915_mem_release(dev, filp, dev_priv->agp_heap);
867 drm_ioctl_desc_t i915_ioctls[] = {
868 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
869 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
870 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
871 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
872 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
873 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
874 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
875 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
876 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
877 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
878 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
879 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
880 [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
881 [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
882 [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
883 [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
884 [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH},
887 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
890 * Determine if the device really is AGP or not.
892 * All Intel graphics chipsets are treated as AGP, even if they are really
895 * \param dev The device to be tested.
898 * A value of 1 is always retured to indictate every i9x5 is AGP.
900 int i915_driver_device_is_agp(drm_device_t * dev)