1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 /* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
46 for (i = 0; i < 10000; i++) {
47 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48 ring->space = ring->head - (ring->tail + 8);
50 ring->space += ring->Size;
54 if (ring->head != last_head)
57 last_head = ring->head;
64 void i915_kernel_lost_context(struct drm_device * dev)
66 drm_i915_private_t *dev_priv = dev->dev_private;
67 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
70 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
71 ring->space = ring->head - (ring->tail + 8);
73 ring->space += ring->Size;
76 static int i915_dma_cleanup(struct drm_device * dev)
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 /* Make sure interrupts are disabled here because the uninstall ioctl
80 * may not have been called from userspace and after dev_private
81 * is freed, it's too late.
84 drm_irq_uninstall(dev);
86 if (dev_priv->ring.virtual_start) {
87 drm_core_ioremapfree(&dev_priv->ring.map, dev);
88 dev_priv->ring.virtual_start = 0;
89 dev_priv->ring.map.handle = 0;
90 dev_priv->ring.map.size = 0;
93 if (dev_priv->status_page_dmah) {
94 drm_pci_free(dev, dev_priv->status_page_dmah);
95 dev_priv->status_page_dmah = NULL;
96 /* Need to rewrite hardware status page */
97 I915_WRITE(0x02080, 0x1ffff000);
100 if (dev_priv->status_gfx_addr) {
101 dev_priv->status_gfx_addr = 0;
102 drm_core_ioremapfree(&dev_priv->hws_map, dev);
103 I915_WRITE(0x02080, 0x1ffff000);
110 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
111 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
112 #define DRI2_SAREA_BLOCK_NEXT(p) \
113 ((void *) ((unsigned char *) (p) + \
114 DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
116 #define DRI2_SAREA_BLOCK_END 0x0000
117 #define DRI2_SAREA_BLOCK_LOCK 0x0001
118 #define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002
121 setup_dri2_sarea(struct drm_device * dev,
122 struct drm_file *file_priv,
123 drm_i915_init_t * init)
125 drm_i915_private_t *dev_priv = dev->dev_private;
127 unsigned int *p, *end, *next;
129 mutex_lock(&dev->struct_mutex);
131 drm_lookup_buffer_object(file_priv,
132 init->sarea_handle, 1);
133 mutex_unlock(&dev->struct_mutex);
135 if (!dev_priv->sarea_bo) {
136 DRM_ERROR("did not find sarea bo\n");
140 ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
141 dev_priv->sarea_bo->num_pages,
142 &dev_priv->sarea_kmap);
144 DRM_ERROR("could not map sarea bo\n");
148 p = dev_priv->sarea_kmap.virtual;
149 end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
150 while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
151 switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
152 case DRI2_SAREA_BLOCK_LOCK:
153 dev->lock.hw_lock = (void *) (p + 1);
154 dev->sigdata.lock = dev->lock.hw_lock;
157 next = DRI2_SAREA_BLOCK_NEXT(p);
158 if (next <= p || end < next) {
159 DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
170 static int i915_initialize(struct drm_device * dev,
171 struct drm_file *file_priv,
172 drm_i915_init_t * init)
174 drm_i915_private_t *dev_priv = dev->dev_private;
177 dev_priv->sarea = drm_getsarea(dev);
178 if (!dev_priv->sarea) {
179 DRM_ERROR("can not find sarea!\n");
180 i915_dma_cleanup(dev);
184 if (init->mmio_offset != 0)
185 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
186 if (!dev_priv->mmio_map) {
187 i915_dma_cleanup(dev);
188 DRM_ERROR("can not find mmio map!\n");
192 #ifdef I915_HAVE_BUFFER
193 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
196 if (init->sarea_priv_offset)
197 dev_priv->sarea_priv = (drm_i915_sarea_t *)
198 ((u8 *) dev_priv->sarea->handle +
199 init->sarea_priv_offset);
201 /* No sarea_priv for you! */
202 dev_priv->sarea_priv = NULL;
205 dev_priv->ring.Start = init->ring_start;
206 dev_priv->ring.End = init->ring_end;
207 dev_priv->ring.Size = init->ring_size;
208 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
210 dev_priv->ring.map.offset = init->ring_start;
211 dev_priv->ring.map.size = init->ring_size;
212 dev_priv->ring.map.type = 0;
213 dev_priv->ring.map.flags = 0;
214 dev_priv->ring.map.mtrr = 0;
216 drm_core_ioremap(&dev_priv->ring.map, dev);
218 if (dev_priv->ring.map.handle == NULL) {
219 i915_dma_cleanup(dev);
220 DRM_ERROR("can not ioremap virtual address for"
225 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
227 dev_priv->cpp = init->cpp;
229 if (dev_priv->sarea_priv)
230 dev_priv->sarea_priv->pf_current_page = 0;
232 /* We are using separate values as placeholders for mechanisms for
233 * private backbuffer/depthbuffer usage.
235 dev_priv->use_mi_batchbuffer_start = 0;
236 if (IS_I965G(dev)) /* 965 doesn't support older method */
237 dev_priv->use_mi_batchbuffer_start = 1;
239 /* Allow hardware batchbuffers unless told otherwise.
241 dev_priv->allow_batchbuffer = 1;
243 /* Enable vblank on pipe A for older X servers
245 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
247 /* Program Hardware Status Page */
249 dev_priv->status_page_dmah =
250 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
252 if (!dev_priv->status_page_dmah) {
253 i915_dma_cleanup(dev);
254 DRM_ERROR("Can not allocate hardware status page\n");
257 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
258 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
260 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
262 I915_WRITE(0x02080, dev_priv->dma_status_page);
264 DRM_DEBUG("Enabled hardware status page\n");
265 #ifdef I915_HAVE_BUFFER
266 mutex_init(&dev_priv->cmdbuf_mutex);
269 if (init->func == I915_INIT_DMA2) {
270 ret = setup_dri2_sarea(dev, file_priv, init);
272 i915_dma_cleanup(dev);
273 DRM_ERROR("could not set up dri2 sarea\n");
282 static int i915_dma_resume(struct drm_device * dev)
284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
288 if (!dev_priv->sarea) {
289 DRM_ERROR("can not find sarea!\n");
293 if (!dev_priv->mmio_map) {
294 DRM_ERROR("can not find mmio map!\n");
298 if (dev_priv->ring.map.handle == NULL) {
299 DRM_ERROR("can not ioremap virtual address for"
304 /* Program Hardware Status Page */
305 if (!dev_priv->hw_status_page) {
306 DRM_ERROR("Can not find hardware status page\n");
309 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
311 if (dev_priv->status_gfx_addr != 0)
312 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
314 I915_WRITE(0x02080, dev_priv->dma_status_page);
315 DRM_DEBUG("Enabled hardware status page\n");
320 static int i915_dma_init(struct drm_device *dev, void *data,
321 struct drm_file *file_priv)
323 drm_i915_init_t *init = data;
326 switch (init->func) {
329 retcode = i915_initialize(dev, file_priv, init);
331 case I915_CLEANUP_DMA:
332 retcode = i915_dma_cleanup(dev);
334 case I915_RESUME_DMA:
335 retcode = i915_dma_resume(dev);
345 /* Implement basically the same security restrictions as hardware does
346 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
348 * Most of the calculations below involve calculating the size of a
349 * particular instruction. It's important to get the size right as
350 * that tells us where the next instruction to check is. Any illegal
351 * instruction detected will be given a size of zero, which is a
352 * signal to abort the rest of the buffer.
354 static int do_validate_cmd(int cmd)
356 switch (((cmd >> 29) & 0x7)) {
358 switch ((cmd >> 23) & 0x3f) {
360 return 1; /* MI_NOOP */
362 return 1; /* MI_FLUSH */
364 return 0; /* disallow everything else */
368 return 0; /* reserved */
370 return (cmd & 0xff) + 2; /* 2d commands */
372 if (((cmd >> 24) & 0x1f) <= 0x18)
375 switch ((cmd >> 24) & 0x1f) {
379 switch ((cmd >> 16) & 0xff) {
381 return (cmd & 0x1f) + 2;
383 return (cmd & 0xf) + 2;
385 return (cmd & 0xffff) + 2;
389 return (cmd & 0xffff) + 1;
393 if ((cmd & (1 << 23)) == 0) /* inline vertices */
394 return (cmd & 0x1ffff) + 2;
395 else if (cmd & (1 << 17)) /* indirect random */
396 if ((cmd & 0xffff) == 0)
397 return 0; /* unknown length, too hard */
399 return (((cmd & 0xffff) + 1) / 2) + 1;
401 return 2; /* indirect sequential */
412 static int validate_cmd(int cmd)
414 int ret = do_validate_cmd(cmd);
416 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
421 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
424 drm_i915_private_t *dev_priv = dev->dev_private;
428 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
431 BEGIN_LP_RING((dwords+1)&~1);
433 for (i = 0; i < dwords;) {
436 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
439 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
445 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
461 static int i915_emit_box(struct drm_device * dev,
462 struct drm_clip_rect __user * boxes,
463 int i, int DR1, int DR4)
465 drm_i915_private_t *dev_priv = dev->dev_private;
466 struct drm_clip_rect box;
469 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
473 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
474 DRM_ERROR("Bad box %d,%d..%d,%d\n",
475 box.x1, box.y1, box.x2, box.y2);
481 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
482 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
483 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
488 OUT_RING(GFX_OP_DRAWRECT_INFO);
490 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
491 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
500 /* XXX: Emitting the counter should really be moved to part of the IRQ
501 * emit. For now, do it in both places:
504 void i915_emit_breadcrumb(struct drm_device *dev)
506 drm_i915_private_t *dev_priv = dev->dev_private;
509 if (++dev_priv->counter > BREADCRUMB_MASK) {
510 dev_priv->counter = 1;
511 DRM_DEBUG("Breadcrumb counter wrapped around\n");
514 if (dev_priv->sarea_priv)
515 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
518 OUT_RING(CMD_STORE_DWORD_IDX);
520 OUT_RING(dev_priv->counter);
526 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
528 drm_i915_private_t *dev_priv = dev->dev_private;
529 uint32_t flush_cmd = CMD_MI_FLUSH;
534 i915_kernel_lost_context(dev);
547 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
548 drm_i915_cmdbuffer_t * cmd)
550 #ifdef I915_HAVE_FENCE
551 drm_i915_private_t *dev_priv = dev->dev_private;
553 int nbox = cmd->num_cliprects;
554 int i = 0, count, ret;
557 DRM_ERROR("alignment\n");
561 i915_kernel_lost_context(dev);
563 count = nbox ? nbox : 1;
565 for (i = 0; i < count; i++) {
567 ret = i915_emit_box(dev, cmd->cliprects, i,
573 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
578 i915_emit_breadcrumb(dev);
579 #ifdef I915_HAVE_FENCE
580 if (unlikely((dev_priv->counter & 0xFF) == 0))
581 drm_fence_flush_old(dev, 0, dev_priv->counter);
586 static int i915_dispatch_batchbuffer(struct drm_device * dev,
587 drm_i915_batchbuffer_t * batch)
589 drm_i915_private_t *dev_priv = dev->dev_private;
590 struct drm_clip_rect __user *boxes = batch->cliprects;
591 int nbox = batch->num_cliprects;
595 if ((batch->start | batch->used) & 0x7) {
596 DRM_ERROR("alignment\n");
600 i915_kernel_lost_context(dev);
602 count = nbox ? nbox : 1;
604 for (i = 0; i < count; i++) {
606 int ret = i915_emit_box(dev, boxes, i,
607 batch->DR1, batch->DR4);
612 if (dev_priv->use_mi_batchbuffer_start) {
615 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
616 OUT_RING(batch->start);
618 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
619 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
625 OUT_RING(MI_BATCH_BUFFER);
626 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
627 OUT_RING(batch->start + batch->used - 4);
633 i915_emit_breadcrumb(dev);
634 #ifdef I915_HAVE_FENCE
635 if (unlikely((dev_priv->counter & 0xFF) == 0))
636 drm_fence_flush_old(dev, 0, dev_priv->counter);
641 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
643 drm_i915_private_t *dev_priv = dev->dev_private;
644 u32 num_pages, current_page, next_page, dspbase;
645 int shift = 2 * plane, x, y;
648 /* Calculate display base offset */
649 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
650 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
651 next_page = (current_page + 1) % num_pages;
656 dspbase = dev_priv->sarea_priv->front_offset;
659 dspbase = dev_priv->sarea_priv->back_offset;
662 dspbase = dev_priv->sarea_priv->third_offset;
667 x = dev_priv->sarea_priv->planeA_x;
668 y = dev_priv->sarea_priv->planeA_y;
670 x = dev_priv->sarea_priv->planeB_x;
671 y = dev_priv->sarea_priv->planeB_y;
674 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
676 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
681 (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
682 MI_WAIT_FOR_PLANE_A_FLIP)));
683 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
684 (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
685 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
689 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
690 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
693 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
695 drm_i915_private_t *dev_priv = dev->dev_private;
698 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
699 planes, dev_priv->sarea_priv->pf_current_page);
701 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
703 for (i = 0; i < 2; i++)
704 if (planes & (1 << i))
705 i915_do_dispatch_flip(dev, i, sync);
707 i915_emit_breadcrumb(dev);
708 #ifdef I915_HAVE_FENCE
709 if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
710 drm_fence_flush_old(dev, 0, dev_priv->counter);
714 static int i915_quiescent(struct drm_device *dev)
716 drm_i915_private_t *dev_priv = dev->dev_private;
718 i915_kernel_lost_context(dev);
719 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
722 static int i915_flush_ioctl(struct drm_device *dev, void *data,
723 struct drm_file *file_priv)
726 LOCK_TEST_WITH_RETURN(dev, file_priv);
728 return i915_quiescent(dev);
731 static int i915_batchbuffer(struct drm_device *dev, void *data,
732 struct drm_file *file_priv)
734 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
735 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
736 dev_priv->sarea_priv;
737 drm_i915_batchbuffer_t *batch = data;
740 if (!dev_priv->allow_batchbuffer) {
741 DRM_ERROR("Batchbuffer ioctl disabled\n");
745 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
746 batch->start, batch->used, batch->num_cliprects);
748 LOCK_TEST_WITH_RETURN(dev, file_priv);
750 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
751 batch->num_cliprects *
752 sizeof(struct drm_clip_rect)))
755 ret = i915_dispatch_batchbuffer(dev, batch);
757 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
761 static int i915_cmdbuffer(struct drm_device *dev, void *data,
762 struct drm_file *file_priv)
764 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
765 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
766 dev_priv->sarea_priv;
767 drm_i915_cmdbuffer_t *cmdbuf = data;
770 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
771 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
773 LOCK_TEST_WITH_RETURN(dev, file_priv);
775 if (cmdbuf->num_cliprects &&
776 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
777 cmdbuf->num_cliprects *
778 sizeof(struct drm_clip_rect))) {
779 DRM_ERROR("Fault accessing cliprects\n");
783 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
785 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
789 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
794 #define DRM_DEBUG_RELOCATION (drm_debug != 0)
796 #define DRM_DEBUG_RELOCATION 0
799 #ifdef I915_HAVE_BUFFER
801 struct i915_relocatee_info {
802 struct drm_buffer_object *buf;
803 unsigned long offset;
805 unsigned page_offset;
806 struct drm_bo_kmap_obj kmap;
810 struct drm_i915_validate_buffer {
811 struct drm_buffer_object *buffer;
812 struct drm_bo_info_rep rep;
813 int presumed_offset_correct;
818 static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,
819 unsigned num_buffers)
821 while (num_buffers--)
822 drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
825 int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
826 struct drm_i915_validate_buffer *buffers,
827 struct i915_relocatee_info *relocatee,
831 unsigned long new_cmd_offset;
837 * FIXME: O(relocs * buffers) complexity.
840 for (i = 0; i <= num_buffers; i++)
841 if (buffers[i].buffer)
842 if (reloc[2] == buffers[i].buffer->base.hash.key)
845 if (buf_index == -1) {
846 DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
851 * Short-circuit relocations that were correctly
852 * guessed by the client
854 if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
857 new_cmd_offset = reloc[0];
858 if (!relocatee->data_page ||
859 !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
860 drm_bo_kunmap(&relocatee->kmap);
861 relocatee->data_page = NULL;
862 relocatee->offset = new_cmd_offset;
863 ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
864 1, &relocatee->kmap);
866 DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset);
869 relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
870 &relocatee->is_iomem);
871 relocatee->page_offset = (relocatee->offset & PAGE_MASK);
874 val = buffers[buf_index].buffer->offset;
875 index = (reloc[0] - relocatee->page_offset) >> 2;
877 /* add in validate */
878 val = val + reloc[1];
880 if (DRM_DEBUG_RELOCATION) {
881 if (buffers[buf_index].presumed_offset_correct &&
882 relocatee->data_page[index] != val) {
883 DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
884 reloc[0], reloc[1], buf_index, relocatee->data_page[index], val);
888 if (relocatee->is_iomem)
889 iowrite32(val, relocatee->data_page + index);
891 relocatee->data_page[index] = val;
895 int i915_process_relocs(struct drm_file *file_priv,
897 uint32_t __user **reloc_user_ptr,
898 struct i915_relocatee_info *relocatee,
899 struct drm_i915_validate_buffer *buffers,
900 uint32_t num_buffers)
902 int ret, reloc_stride;
904 uint32_t reloc_count;
906 uint32_t reloc_buf_size;
907 uint32_t *reloc_buf = NULL;
910 /* do a copy from user from the user ptr */
911 ret = get_user(reloc_count, *reloc_user_ptr);
913 DRM_ERROR("Could not map relocation buffer.\n");
917 ret = get_user(reloc_type, (*reloc_user_ptr)+1);
919 DRM_ERROR("Could not map relocation buffer.\n");
923 if (reloc_type != 0) {
924 DRM_ERROR("Unsupported relocation type requested\n");
929 reloc_buf_size = (I915_RELOC_HEADER + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
930 reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
932 DRM_ERROR("Out of memory for reloc buffer\n");
937 if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
942 /* get next relocate buffer handle */
943 *reloc_user_ptr = (uint32_t *)*(unsigned long *)&reloc_buf[2];
945 reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
947 DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, *reloc_user_ptr);
949 for (i = 0; i < reloc_count; i++) {
950 cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
952 ret = i915_apply_reloc(file_priv, num_buffers, buffers,
953 relocatee, reloc_buf + cur_offset);
962 if (relocatee->data_page) {
963 drm_bo_kunmap(&relocatee->kmap);
964 relocatee->data_page = NULL;
970 static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
971 uint32_t __user *reloc_user_ptr,
972 struct drm_i915_validate_buffer *buffers,
975 struct drm_device *dev = file_priv->head->dev;
976 struct i915_relocatee_info relocatee;
981 * Short circuit relocations when all previous
982 * buffers offsets were correctly guessed by
985 if (!DRM_DEBUG_RELOCATION) {
986 for (b = 0; b < buf_count; b++)
987 if (!buffers[b].presumed_offset_correct)
994 memset(&relocatee, 0, sizeof(relocatee));
996 mutex_lock(&dev->struct_mutex);
997 relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
998 mutex_unlock(&dev->struct_mutex);
999 if (!relocatee.buf) {
1000 DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
1005 mutex_lock (&relocatee.buf->mutex);
1006 ret = drm_bo_wait (relocatee.buf, 0, 0, FALSE);
1010 while (reloc_user_ptr) {
1011 ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count);
1013 DRM_ERROR("process relocs failed\n");
1019 mutex_unlock (&relocatee.buf->mutex);
1020 drm_bo_usage_deref_unlocked(&relocatee.buf);
1025 static int i915_check_presumed(struct drm_i915_op_arg *arg,
1026 struct drm_buffer_object *bo,
1027 uint32_t __user *data,
1030 struct drm_bo_op_req *req = &arg->d.req;
1031 uint32_t hint_offset;
1032 uint32_t hint = req->bo_req.hint;
1036 if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
1038 if (bo->offset == req->bo_req.presumed_offset) {
1044 * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
1045 * the user-space IOCTL argument list, since the buffer has moved,
1046 * we're about to apply relocations and we might subsequently
1047 * hit an -EAGAIN. In that case the argument list will be reused by
1048 * user-space, but the presumed offset is no longer valid.
1050 * Needless to say, this is a bit ugly.
1053 hint_offset = (uint32_t *)&req->bo_req.hint - (uint32_t *)arg;
1054 hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
1055 return __put_user(hint, data + hint_offset);
1060 * Validate, add fence and relocate a block of bos from a userspace list
1062 int i915_validate_buffer_list(struct drm_file *file_priv,
1063 unsigned int fence_class, uint64_t data,
1064 struct drm_i915_validate_buffer *buffers,
1065 uint32_t *num_buffers)
1067 struct drm_i915_op_arg arg;
1068 struct drm_bo_op_req *req = &arg.d.req;
1070 unsigned buf_count = 0;
1071 uint32_t buf_handle;
1072 uint32_t __user *reloc_user_ptr;
1073 struct drm_i915_validate_buffer *item = buffers;
1076 if (buf_count >= *num_buffers) {
1077 DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
1081 item = buffers + buf_count;
1082 item->buffer = NULL;
1083 item->presumed_offset_correct = 0;
1085 buffers[buf_count].buffer = NULL;
1087 if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
1093 if (req->op != drm_bo_validate) {
1095 ("Buffer object operation wasn't \"validate\".\n");
1100 item->data = (void __user *) (unsigned long) data;
1102 buf_handle = req->bo_req.handle;
1103 reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr;
1105 if (reloc_user_ptr) {
1106 ret = i915_exec_reloc(file_priv, buf_handle, reloc_user_ptr, buffers, buf_count);
1109 DRM_MEMORYBARRIER();
1112 ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1113 req->bo_req.flags, req->bo_req.mask,
1115 req->bo_req.fence_class, 0,
1120 DRM_ERROR("error on handle validate %d\n", ret);
1126 ret = i915_check_presumed(&arg, item->buffer,
1128 (unsigned long) data,
1129 &item->presumed_offset_correct);
1134 } while (data != 0);
1136 *num_buffers = buf_count;
1137 item->ret = (ret != -EAGAIN) ? ret : 0;
1143 * Remove all buffers from the unfenced list.
1144 * If the execbuffer operation was aborted, for example due to a signal,
1145 * this also make sure that buffers retain their original state and
1147 * Copy back buffer information to user-space unless we were interrupted
1148 * by a signal. In which case the IOCTL must be rerun.
1151 static int i915_handle_copyback(struct drm_device *dev,
1152 struct drm_i915_validate_buffer *buffers,
1153 unsigned int num_buffers, int ret)
1157 struct drm_i915_op_arg arg;
1160 drm_putback_buffer_objects(dev);
1162 if (ret != -EAGAIN) {
1163 for (i = 0; i < num_buffers; ++i) {
1165 arg.d.rep.ret = buffers->ret;
1166 arg.d.rep.bo_info = buffers->rep;
1167 if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
1177 * Create a fence object, and if that fails, pretend that everything is
1178 * OK and just idle the GPU.
1181 void i915_fence_or_sync(struct drm_file *file_priv,
1182 uint32_t fence_flags,
1183 struct drm_fence_arg *fence_arg,
1184 struct drm_fence_object **fence_p)
1186 struct drm_device *dev = file_priv->head->dev;
1188 struct drm_fence_object *fence;
1190 ret = drm_fence_buffer_objects(dev, NULL, fence_flags,
1196 * Fence creation failed.
1197 * Fall back to synchronous operation and idle the engine.
1200 (void) i915_emit_mi_flush(dev, MI_READ_FLUSH);
1201 (void) i915_quiescent(dev);
1203 if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
1206 * Communicate to user-space that
1207 * fence creation has failed and that
1208 * the engine is idle.
1211 fence_arg->handle = ~0;
1212 fence_arg->error = ret;
1215 drm_putback_buffer_objects(dev);
1221 if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
1223 ret = drm_fence_add_user_object(file_priv, fence,
1225 DRM_FENCE_FLAG_SHAREABLE);
1227 drm_fence_fill_arg(fence, fence_arg);
1230 * Fence user object creation failed.
1231 * We must idle the engine here as well, as user-
1232 * space expects a fence object to wait on. Since we
1233 * have a fence object we wait for it to signal
1234 * to indicate engine "sufficiently" idle.
1237 (void) drm_fence_object_wait(fence, 0, 1,
1239 drm_fence_usage_deref_unlocked(&fence);
1240 fence_arg->handle = ~0;
1241 fence_arg->error = ret;
1248 drm_fence_usage_deref_unlocked(&fence);
1252 static int i915_execbuffer(struct drm_device *dev, void *data,
1253 struct drm_file *file_priv)
1255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1256 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
1257 dev_priv->sarea_priv;
1258 struct drm_i915_execbuffer *exec_buf = data;
1259 struct drm_i915_batchbuffer *batch = &exec_buf->batch;
1260 struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
1263 struct drm_i915_validate_buffer *buffers;
1265 if (!dev_priv->allow_batchbuffer) {
1266 DRM_ERROR("Batchbuffer ioctl disabled\n");
1271 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
1272 batch->num_cliprects *
1273 sizeof(struct drm_clip_rect)))
1276 if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
1280 ret = drm_bo_read_lock(&dev->bm.bm_lock);
1285 * The cmdbuf_mutex makes sure the validate-submit-fence
1286 * operation is atomic.
1289 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
1291 drm_bo_read_unlock(&dev->bm.bm_lock);
1295 num_buffers = exec_buf->num_buffers;
1297 buffers = drm_calloc(num_buffers, sizeof(struct drm_i915_validate_buffer), DRM_MEM_DRIVER);
1299 drm_bo_read_unlock(&dev->bm.bm_lock);
1300 mutex_unlock(&dev_priv->cmdbuf_mutex);
1304 /* validate buffer list + fixup relocations */
1305 ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
1306 buffers, &num_buffers);
1310 /* make sure all previous memory operations have passed */
1311 DRM_MEMORYBARRIER();
1312 drm_agp_chipset_flush(dev);
1315 batch->start = buffers[num_buffers-1].buffer->offset;
1317 DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
1318 batch->start, batch->used, batch->num_cliprects);
1320 ret = i915_dispatch_batchbuffer(dev, batch);
1325 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
1327 i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
1332 ret = i915_handle_copyback(dev, buffers, num_buffers, ret);
1333 mutex_lock(&dev->struct_mutex);
1334 i915_dereference_buffers_locked(buffers, num_buffers);
1335 mutex_unlock(&dev->struct_mutex);
1337 drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
1339 mutex_unlock(&dev_priv->cmdbuf_mutex);
1340 drm_bo_read_unlock(&dev->bm.bm_lock);
1345 static int i915_do_cleanup_pageflip(struct drm_device * dev)
1347 drm_i915_private_t *dev_priv = dev->dev_private;
1348 int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
1352 for (i = 0, planes = 0; i < 2; i++)
1353 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
1354 dev_priv->sarea_priv->pf_current_page =
1355 (dev_priv->sarea_priv->pf_current_page &
1356 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
1362 i915_dispatch_flip(dev, planes, 0);
1367 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1369 drm_i915_flip_t *param = data;
1373 LOCK_TEST_WITH_RETURN(dev, file_priv);
1375 /* This is really planes */
1376 if (param->pipes & ~0x3) {
1377 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
1382 i915_dispatch_flip(dev, param->pipes, 0);
1388 static int i915_getparam(struct drm_device *dev, void *data,
1389 struct drm_file *file_priv)
1391 drm_i915_private_t *dev_priv = dev->dev_private;
1392 drm_i915_getparam_t *param = data;
1396 DRM_ERROR("called with no initialization\n");
1400 switch (param->param) {
1401 case I915_PARAM_IRQ_ACTIVE:
1402 value = dev->irq ? 1 : 0;
1404 case I915_PARAM_ALLOW_BATCHBUFFER:
1405 value = dev_priv->allow_batchbuffer ? 1 : 0;
1407 case I915_PARAM_LAST_DISPATCH:
1408 value = READ_BREADCRUMB(dev_priv);
1410 case I915_PARAM_CHIPSET_ID:
1411 value = dev->pci_device;
1414 DRM_ERROR("Unknown parameter %d\n", param->param);
1418 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1419 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1426 static int i915_setparam(struct drm_device *dev, void *data,
1427 struct drm_file *file_priv)
1429 drm_i915_private_t *dev_priv = dev->dev_private;
1430 drm_i915_setparam_t *param = data;
1433 DRM_ERROR("called with no initialization\n");
1437 switch (param->param) {
1438 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1440 dev_priv->use_mi_batchbuffer_start = param->value;
1442 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1443 dev_priv->tex_lru_log_granularity = param->value;
1445 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1446 dev_priv->allow_batchbuffer = param->value;
1449 DRM_ERROR("unknown parameter %d\n", param->param);
1456 drm_i915_mmio_entry_t mmio_table[] = {
1457 [MMIO_REGS_PS_DEPTH_COUNT] = {
1458 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
1464 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
1466 static int i915_mmio(struct drm_device *dev, void *data,
1467 struct drm_file *file_priv)
1470 drm_i915_private_t *dev_priv = dev->dev_private;
1471 drm_i915_mmio_entry_t *e;
1472 drm_i915_mmio_t *mmio = data;
1477 DRM_ERROR("called with no initialization\n");
1481 if (mmio->reg >= mmio_table_size)
1484 e = &mmio_table[mmio->reg];
1485 base = (u8 *) dev_priv->mmio_map->handle + e->offset;
1487 switch (mmio->read_write) {
1488 case I915_MMIO_READ:
1489 if (!(e->flag & I915_MMIO_MAY_READ))
1491 for (i = 0; i < e->size / 4; i++)
1492 buf[i] = I915_READ(e->offset + i * 4);
1493 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
1494 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1499 case I915_MMIO_WRITE:
1500 if (!(e->flag & I915_MMIO_MAY_WRITE))
1502 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
1503 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1506 for (i = 0; i < e->size / 4; i++)
1507 I915_WRITE(e->offset + i * 4, buf[i]);
1513 static int i915_set_status_page(struct drm_device *dev, void *data,
1514 struct drm_file *file_priv)
1516 drm_i915_private_t *dev_priv = dev->dev_private;
1517 drm_i915_hws_addr_t *hws = data;
1520 DRM_ERROR("called with no initialization\n");
1523 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
1525 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
1527 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
1528 dev_priv->hws_map.size = 4*1024;
1529 dev_priv->hws_map.type = 0;
1530 dev_priv->hws_map.flags = 0;
1531 dev_priv->hws_map.mtrr = 0;
1533 drm_core_ioremap(&dev_priv->hws_map, dev);
1534 if (dev_priv->hws_map.handle == NULL) {
1535 i915_dma_cleanup(dev);
1536 dev_priv->status_gfx_addr = 0;
1537 DRM_ERROR("can not ioremap virtual address for"
1538 " G33 hw status page\n");
1541 dev_priv->hw_status_page = dev_priv->hws_map.handle;
1543 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1544 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
1545 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1546 dev_priv->status_gfx_addr);
1547 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1551 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1553 struct drm_i915_private *dev_priv = dev->dev_private;
1554 unsigned long base, size;
1555 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1557 /* i915 has 4 more counters */
1559 dev->types[6] = _DRM_STAT_IRQ;
1560 dev->types[7] = _DRM_STAT_PRIMARY;
1561 dev->types[8] = _DRM_STAT_SECONDARY;
1562 dev->types[9] = _DRM_STAT_DMA;
1564 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1565 if (dev_priv == NULL)
1568 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1570 dev->dev_private = (void *)dev_priv;
1572 /* Add register map (needed for suspend/resume) */
1573 base = drm_get_resource_start(dev, mmio_bar);
1574 size = drm_get_resource_len(dev, mmio_bar);
1576 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1577 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1580 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1581 intel_init_chipset_flush_compat(dev);
1588 int i915_driver_unload(struct drm_device *dev)
1590 struct drm_i915_private *dev_priv = dev->dev_private;
1592 if (dev_priv->mmio_map)
1593 drm_rmmap(dev, dev_priv->mmio_map);
1595 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1598 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1599 intel_fini_chipset_flush_compat(dev);
1605 void i915_driver_lastclose(struct drm_device * dev)
1607 drm_i915_private_t *dev_priv = dev->dev_private;
1609 if (drm_getsarea(dev) && dev_priv->sarea_priv)
1610 i915_do_cleanup_pageflip(dev);
1611 if (dev_priv->agp_heap)
1612 i915_mem_takedown(&(dev_priv->agp_heap));
1614 if (dev_priv->sarea_kmap.virtual) {
1615 drm_bo_kunmap(&dev_priv->sarea_kmap);
1616 dev_priv->sarea_kmap.virtual = NULL;
1617 dev->lock.hw_lock = NULL;
1618 dev->sigdata.lock = NULL;
1621 if (dev_priv->sarea_bo) {
1622 mutex_lock(&dev->struct_mutex);
1623 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
1624 mutex_unlock(&dev->struct_mutex);
1625 dev_priv->sarea_bo = NULL;
1628 i915_dma_cleanup(dev);
1631 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1633 drm_i915_private_t *dev_priv = dev->dev_private;
1634 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1637 struct drm_ioctl_desc i915_ioctls[] = {
1638 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1639 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1640 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1641 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1642 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1643 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1644 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1645 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1646 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1647 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1648 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1650 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1651 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1652 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1653 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1654 DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1656 #ifdef I915_HAVE_BUFFER
1657 DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
1661 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1664 * Determine if the device really is AGP or not.
1666 * All Intel graphics chipsets are treated as AGP, even if they are really
1669 * \param dev The device to be tested.
1672 * A value of 1 is always retured to indictate every i9x5 is AGP.
1674 int i915_driver_device_is_agp(struct drm_device * dev)
1679 int i915_driver_firstopen(struct drm_device *dev)
1681 #ifdef I915_HAVE_BUFFER
1682 drm_bo_driver_init(dev);