1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 /* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
46 for (i = 0; i < 10000; i++) {
47 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
48 ring->space = ring->head - (ring->tail + 8);
50 ring->space += ring->Size;
54 if (ring->head != last_head)
57 last_head = ring->head;
64 void i915_kernel_lost_context(struct drm_device * dev)
66 drm_i915_private_t *dev_priv = dev->dev_private;
67 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
70 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
71 ring->space = ring->head - (ring->tail + 8);
73 ring->space += ring->Size;
76 static int i915_dma_cleanup(struct drm_device * dev)
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 /* Make sure interrupts are disabled here because the uninstall ioctl
80 * may not have been called from userspace and after dev_private
81 * is freed, it's too late.
84 drm_irq_uninstall(dev);
86 if (dev_priv->ring.virtual_start) {
87 drm_core_ioremapfree(&dev_priv->ring.map, dev);
88 dev_priv->ring.virtual_start = 0;
89 dev_priv->ring.map.handle = 0;
90 dev_priv->ring.map.size = 0;
93 if (dev_priv->status_page_dmah) {
94 drm_pci_free(dev, dev_priv->status_page_dmah);
95 dev_priv->status_page_dmah = NULL;
96 /* Need to rewrite hardware status page */
97 I915_WRITE(0x02080, 0x1ffff000);
100 if (dev_priv->status_gfx_addr) {
101 dev_priv->status_gfx_addr = 0;
102 drm_core_ioremapfree(&dev_priv->hws_map, dev);
103 I915_WRITE(0x02080, 0x1ffff000);
109 #if defined(I915_HAVE_BUFFER)
110 #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
111 #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
112 #define DRI2_SAREA_BLOCK_NEXT(p) \
113 ((void *) ((unsigned char *) (p) + \
114 DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
116 #define DRI2_SAREA_BLOCK_END 0x0000
117 #define DRI2_SAREA_BLOCK_LOCK 0x0001
118 #define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002
121 setup_dri2_sarea(struct drm_device * dev,
122 struct drm_file *file_priv,
123 drm_i915_init_t * init)
125 drm_i915_private_t *dev_priv = dev->dev_private;
127 unsigned int *p, *end, *next;
129 mutex_lock(&dev->struct_mutex);
131 drm_lookup_buffer_object(file_priv,
132 init->sarea_handle, 1);
133 mutex_unlock(&dev->struct_mutex);
135 if (!dev_priv->sarea_bo) {
136 DRM_ERROR("did not find sarea bo\n");
140 ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
141 dev_priv->sarea_bo->num_pages,
142 &dev_priv->sarea_kmap);
144 DRM_ERROR("could not map sarea bo\n");
148 p = dev_priv->sarea_kmap.virtual;
149 end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
150 while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
151 switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
152 case DRI2_SAREA_BLOCK_LOCK:
153 dev->lock.hw_lock = (void *) (p + 1);
154 dev->sigdata.lock = dev->lock.hw_lock;
157 next = DRI2_SAREA_BLOCK_NEXT(p);
158 if (next <= p || end < next) {
159 DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
170 static int i915_initialize(struct drm_device * dev,
171 struct drm_file *file_priv,
172 drm_i915_init_t * init)
174 drm_i915_private_t *dev_priv = dev->dev_private;
175 #if defined(I915_HAVE_BUFFER)
178 dev_priv->sarea = drm_getsarea(dev);
179 if (!dev_priv->sarea) {
180 DRM_ERROR("can not find sarea!\n");
181 i915_dma_cleanup(dev);
185 if (init->mmio_offset != 0)
186 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
187 if (!dev_priv->mmio_map) {
188 i915_dma_cleanup(dev);
189 DRM_ERROR("can not find mmio map!\n");
193 #ifdef I915_HAVE_BUFFER
194 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
197 if (init->sarea_priv_offset)
198 dev_priv->sarea_priv = (drm_i915_sarea_t *)
199 ((u8 *) dev_priv->sarea->handle +
200 init->sarea_priv_offset);
202 /* No sarea_priv for you! */
203 dev_priv->sarea_priv = NULL;
206 dev_priv->ring.Start = init->ring_start;
207 dev_priv->ring.End = init->ring_end;
208 dev_priv->ring.Size = init->ring_size;
209 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
211 dev_priv->ring.map.offset = init->ring_start;
212 dev_priv->ring.map.size = init->ring_size;
213 dev_priv->ring.map.type = 0;
214 dev_priv->ring.map.flags = 0;
215 dev_priv->ring.map.mtrr = 0;
217 drm_core_ioremap(&dev_priv->ring.map, dev);
219 if (dev_priv->ring.map.handle == NULL) {
220 i915_dma_cleanup(dev);
221 DRM_ERROR("can not ioremap virtual address for"
226 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
228 dev_priv->cpp = init->cpp;
230 if (dev_priv->sarea_priv)
231 dev_priv->sarea_priv->pf_current_page = 0;
233 /* We are using separate values as placeholders for mechanisms for
234 * private backbuffer/depthbuffer usage.
236 dev_priv->use_mi_batchbuffer_start = 0;
237 if (IS_I965G(dev)) /* 965 doesn't support older method */
238 dev_priv->use_mi_batchbuffer_start = 1;
240 /* Allow hardware batchbuffers unless told otherwise.
242 dev_priv->allow_batchbuffer = 1;
244 /* Enable vblank on pipe A for older X servers
246 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
248 /* Program Hardware Status Page */
249 if (!I915_NEED_GFX_HWS(dev)) {
250 dev_priv->status_page_dmah =
251 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
253 if (!dev_priv->status_page_dmah) {
254 i915_dma_cleanup(dev);
255 DRM_ERROR("Can not allocate hardware status page\n");
258 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
259 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
261 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
263 I915_WRITE(0x02080, dev_priv->dma_status_page);
265 DRM_DEBUG("Enabled hardware status page\n");
266 #ifdef I915_HAVE_BUFFER
267 mutex_init(&dev_priv->cmdbuf_mutex);
269 #if defined(I915_HAVE_BUFFER)
270 if (init->func == I915_INIT_DMA2) {
271 ret = setup_dri2_sarea(dev, file_priv, init);
273 i915_dma_cleanup(dev);
274 DRM_ERROR("could not set up dri2 sarea\n");
283 static int i915_dma_resume(struct drm_device * dev)
285 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
289 if (!dev_priv->sarea) {
290 DRM_ERROR("can not find sarea!\n");
294 if (!dev_priv->mmio_map) {
295 DRM_ERROR("can not find mmio map!\n");
299 if (dev_priv->ring.map.handle == NULL) {
300 DRM_ERROR("can not ioremap virtual address for"
305 /* Program Hardware Status Page */
306 if (!dev_priv->hw_status_page) {
307 DRM_ERROR("Can not find hardware status page\n");
310 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
312 if (dev_priv->status_gfx_addr != 0)
313 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
315 I915_WRITE(0x02080, dev_priv->dma_status_page);
316 DRM_DEBUG("Enabled hardware status page\n");
321 static int i915_dma_init(struct drm_device *dev, void *data,
322 struct drm_file *file_priv)
324 drm_i915_init_t *init = data;
327 switch (init->func) {
330 retcode = i915_initialize(dev, file_priv, init);
332 case I915_CLEANUP_DMA:
333 retcode = i915_dma_cleanup(dev);
335 case I915_RESUME_DMA:
336 retcode = i915_dma_resume(dev);
346 /* Implement basically the same security restrictions as hardware does
347 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
349 * Most of the calculations below involve calculating the size of a
350 * particular instruction. It's important to get the size right as
351 * that tells us where the next instruction to check is. Any illegal
352 * instruction detected will be given a size of zero, which is a
353 * signal to abort the rest of the buffer.
355 static int do_validate_cmd(int cmd)
357 switch (((cmd >> 29) & 0x7)) {
359 switch ((cmd >> 23) & 0x3f) {
361 return 1; /* MI_NOOP */
363 return 1; /* MI_FLUSH */
365 return 0; /* disallow everything else */
369 return 0; /* reserved */
371 return (cmd & 0xff) + 2; /* 2d commands */
373 if (((cmd >> 24) & 0x1f) <= 0x18)
376 switch ((cmd >> 24) & 0x1f) {
380 switch ((cmd >> 16) & 0xff) {
382 return (cmd & 0x1f) + 2;
384 return (cmd & 0xf) + 2;
386 return (cmd & 0xffff) + 2;
390 return (cmd & 0xffff) + 1;
394 if ((cmd & (1 << 23)) == 0) /* inline vertices */
395 return (cmd & 0x1ffff) + 2;
396 else if (cmd & (1 << 17)) /* indirect random */
397 if ((cmd & 0xffff) == 0)
398 return 0; /* unknown length, too hard */
400 return (((cmd & 0xffff) + 1) / 2) + 1;
402 return 2; /* indirect sequential */
413 static int validate_cmd(int cmd)
415 int ret = do_validate_cmd(cmd);
417 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
422 static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
425 drm_i915_private_t *dev_priv = dev->dev_private;
429 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
432 BEGIN_LP_RING((dwords+1)&~1);
434 for (i = 0; i < dwords;) {
437 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
440 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
446 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
462 static int i915_emit_box(struct drm_device * dev,
463 struct drm_clip_rect __user * boxes,
464 int i, int DR1, int DR4)
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 struct drm_clip_rect box;
470 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
474 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
475 DRM_ERROR("Bad box %d,%d..%d,%d\n",
476 box.x1, box.y1, box.x2, box.y2);
482 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
483 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
484 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
489 OUT_RING(GFX_OP_DRAWRECT_INFO);
491 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
492 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
501 /* XXX: Emitting the counter should really be moved to part of the IRQ
502 * emit. For now, do it in both places:
505 void i915_emit_breadcrumb(struct drm_device *dev)
507 drm_i915_private_t *dev_priv = dev->dev_private;
510 if (++dev_priv->counter > BREADCRUMB_MASK) {
511 dev_priv->counter = 1;
512 DRM_DEBUG("Breadcrumb counter wrapped around\n");
515 if (dev_priv->sarea_priv)
516 dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
519 OUT_RING(MI_STORE_DWORD_INDEX);
521 OUT_RING(dev_priv->counter);
527 int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
529 drm_i915_private_t *dev_priv = dev->dev_private;
530 uint32_t flush_cmd = MI_FLUSH;
535 i915_kernel_lost_context(dev);
548 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
549 drm_i915_cmdbuffer_t * cmd)
551 #ifdef I915_HAVE_FENCE
552 drm_i915_private_t *dev_priv = dev->dev_private;
554 int nbox = cmd->num_cliprects;
555 int i = 0, count, ret;
558 DRM_ERROR("alignment\n");
562 i915_kernel_lost_context(dev);
564 count = nbox ? nbox : 1;
566 for (i = 0; i < count; i++) {
568 ret = i915_emit_box(dev, cmd->cliprects, i,
574 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
579 i915_emit_breadcrumb(dev);
580 #ifdef I915_HAVE_FENCE
581 if (unlikely((dev_priv->counter & 0xFF) == 0))
582 drm_fence_flush_old(dev, 0, dev_priv->counter);
587 int i915_dispatch_batchbuffer(struct drm_device * dev,
588 drm_i915_batchbuffer_t * batch)
590 drm_i915_private_t *dev_priv = dev->dev_private;
591 struct drm_clip_rect __user *boxes = batch->cliprects;
592 int nbox = batch->num_cliprects;
596 if ((batch->start | batch->used) & 0x7) {
597 DRM_ERROR("alignment\n");
601 i915_kernel_lost_context(dev);
603 count = nbox ? nbox : 1;
605 for (i = 0; i < count; i++) {
607 int ret = i915_emit_box(dev, boxes, i,
608 batch->DR1, batch->DR4);
613 if (dev_priv->use_mi_batchbuffer_start) {
616 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
617 OUT_RING(batch->start);
619 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
620 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
626 OUT_RING(MI_BATCH_BUFFER);
627 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
628 OUT_RING(batch->start + batch->used - 4);
634 i915_emit_breadcrumb(dev);
635 #ifdef I915_HAVE_FENCE
636 if (unlikely((dev_priv->counter & 0xFF) == 0))
637 drm_fence_flush_old(dev, 0, dev_priv->counter);
642 static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
644 drm_i915_private_t *dev_priv = dev->dev_private;
645 u32 num_pages, current_page, next_page, dspbase;
646 int shift = 2 * plane, x, y;
649 /* Calculate display base offset */
650 num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
651 current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
652 next_page = (current_page + 1) % num_pages;
657 dspbase = dev_priv->sarea_priv->front_offset;
660 dspbase = dev_priv->sarea_priv->back_offset;
663 dspbase = dev_priv->sarea_priv->third_offset;
668 x = dev_priv->sarea_priv->planeA_x;
669 y = dev_priv->sarea_priv->planeA_y;
671 x = dev_priv->sarea_priv->planeB_x;
672 y = dev_priv->sarea_priv->planeB_y;
675 dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
677 DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
682 (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
683 MI_WAIT_FOR_PLANE_A_FLIP)));
684 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
685 (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
686 OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
690 dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
691 dev_priv->sarea_priv->pf_current_page |= next_page << shift;
694 void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
696 drm_i915_private_t *dev_priv = dev->dev_private;
699 DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
700 planes, dev_priv->sarea_priv->pf_current_page);
702 i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
704 for (i = 0; i < 2; i++)
705 if (planes & (1 << i))
706 i915_do_dispatch_flip(dev, i, sync);
708 i915_emit_breadcrumb(dev);
709 #ifdef I915_HAVE_FENCE
710 if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
711 drm_fence_flush_old(dev, 0, dev_priv->counter);
715 int i915_quiescent(struct drm_device *dev)
717 drm_i915_private_t *dev_priv = dev->dev_private;
719 i915_kernel_lost_context(dev);
720 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
723 static int i915_flush_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv)
727 LOCK_TEST_WITH_RETURN(dev, file_priv);
729 return i915_quiescent(dev);
732 static int i915_batchbuffer(struct drm_device *dev, void *data,
733 struct drm_file *file_priv)
735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
736 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
737 dev_priv->sarea_priv;
738 drm_i915_batchbuffer_t *batch = data;
741 if (!dev_priv->allow_batchbuffer) {
742 DRM_ERROR("Batchbuffer ioctl disabled\n");
746 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
747 batch->start, batch->used, batch->num_cliprects);
749 LOCK_TEST_WITH_RETURN(dev, file_priv);
751 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
752 batch->num_cliprects *
753 sizeof(struct drm_clip_rect)))
756 ret = i915_dispatch_batchbuffer(dev, batch);
758 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
762 static int i915_cmdbuffer(struct drm_device *dev, void *data,
763 struct drm_file *file_priv)
765 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
766 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
767 dev_priv->sarea_priv;
768 drm_i915_cmdbuffer_t *cmdbuf = data;
771 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
772 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
774 LOCK_TEST_WITH_RETURN(dev, file_priv);
776 if (cmdbuf->num_cliprects &&
777 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
778 cmdbuf->num_cliprects *
779 sizeof(struct drm_clip_rect))) {
780 DRM_ERROR("Fault accessing cliprects\n");
784 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
786 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
790 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
794 #if defined(DRM_DEBUG_CODE)
795 #define DRM_DEBUG_RELOCATION (drm_debug != 0)
797 #define DRM_DEBUG_RELOCATION 0
800 static int i915_do_cleanup_pageflip(struct drm_device * dev)
802 drm_i915_private_t *dev_priv = dev->dev_private;
803 int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
807 for (i = 0, planes = 0; i < 2; i++)
808 if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
809 dev_priv->sarea_priv->pf_current_page =
810 (dev_priv->sarea_priv->pf_current_page &
811 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
817 i915_dispatch_flip(dev, planes, 0);
822 static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
824 drm_i915_flip_t *param = data;
828 LOCK_TEST_WITH_RETURN(dev, file_priv);
830 /* This is really planes */
831 if (param->pipes & ~0x3) {
832 DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
837 i915_dispatch_flip(dev, param->pipes, 0);
843 static int i915_getparam(struct drm_device *dev, void *data,
844 struct drm_file *file_priv)
846 drm_i915_private_t *dev_priv = dev->dev_private;
847 drm_i915_getparam_t *param = data;
851 DRM_ERROR("called with no initialization\n");
855 switch (param->param) {
856 case I915_PARAM_IRQ_ACTIVE:
857 value = dev->irq ? 1 : 0;
859 case I915_PARAM_ALLOW_BATCHBUFFER:
860 value = dev_priv->allow_batchbuffer ? 1 : 0;
862 case I915_PARAM_LAST_DISPATCH:
863 value = READ_BREADCRUMB(dev_priv);
865 case I915_PARAM_CHIPSET_ID:
866 value = dev->pci_device;
869 DRM_ERROR("Unknown parameter %d\n", param->param);
873 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
874 DRM_ERROR("DRM_COPY_TO_USER failed\n");
881 static int i915_setparam(struct drm_device *dev, void *data,
882 struct drm_file *file_priv)
884 drm_i915_private_t *dev_priv = dev->dev_private;
885 drm_i915_setparam_t *param = data;
888 DRM_ERROR("called with no initialization\n");
892 switch (param->param) {
893 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
895 dev_priv->use_mi_batchbuffer_start = param->value;
897 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
898 dev_priv->tex_lru_log_granularity = param->value;
900 case I915_SETPARAM_ALLOW_BATCHBUFFER:
901 dev_priv->allow_batchbuffer = param->value;
904 DRM_ERROR("unknown parameter %d\n", param->param);
911 drm_i915_mmio_entry_t mmio_table[] = {
912 [MMIO_REGS_PS_DEPTH_COUNT] = {
913 I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
919 static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
921 static int i915_mmio(struct drm_device *dev, void *data,
922 struct drm_file *file_priv)
925 drm_i915_private_t *dev_priv = dev->dev_private;
926 drm_i915_mmio_entry_t *e;
927 drm_i915_mmio_t *mmio = data;
932 DRM_ERROR("called with no initialization\n");
936 if (mmio->reg >= mmio_table_size)
939 e = &mmio_table[mmio->reg];
940 base = (u8 *) dev_priv->mmio_map->handle + e->offset;
942 switch (mmio->read_write) {
944 if (!(e->flag & I915_MMIO_MAY_READ))
946 for (i = 0; i < e->size / 4; i++)
947 buf[i] = I915_READ(e->offset + i * 4);
948 if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
949 DRM_ERROR("DRM_COPY_TO_USER failed\n");
954 case I915_MMIO_WRITE:
955 if (!(e->flag & I915_MMIO_MAY_WRITE))
957 if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
958 DRM_ERROR("DRM_COPY_TO_USER failed\n");
961 for (i = 0; i < e->size / 4; i++)
962 I915_WRITE(e->offset + i * 4, buf[i]);
968 static int i915_set_status_page(struct drm_device *dev, void *data,
969 struct drm_file *file_priv)
971 drm_i915_private_t *dev_priv = dev->dev_private;
972 drm_i915_hws_addr_t *hws = data;
974 if (!I915_NEED_GFX_HWS(dev))
978 DRM_ERROR("called with no initialization\n");
981 DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
983 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
985 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
986 dev_priv->hws_map.size = 4*1024;
987 dev_priv->hws_map.type = 0;
988 dev_priv->hws_map.flags = 0;
989 dev_priv->hws_map.mtrr = 0;
991 drm_core_ioremap(&dev_priv->hws_map, dev);
992 if (dev_priv->hws_map.handle == NULL) {
993 i915_dma_cleanup(dev);
994 dev_priv->status_gfx_addr = 0;
995 DRM_ERROR("can not ioremap virtual address for"
996 " G33 hw status page\n");
999 dev_priv->hw_status_page = dev_priv->hws_map.handle;
1001 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
1002 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
1003 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
1004 dev_priv->status_gfx_addr);
1005 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
1009 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1011 struct drm_i915_private *dev_priv;
1012 unsigned long base, size;
1013 int ret = 0, num_pipes = 2, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1015 /* i915 has 4 more counters */
1017 dev->types[6] = _DRM_STAT_IRQ;
1018 dev->types[7] = _DRM_STAT_PRIMARY;
1019 dev->types[8] = _DRM_STAT_SECONDARY;
1020 dev->types[9] = _DRM_STAT_DMA;
1022 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1023 if (dev_priv == NULL)
1026 memset(dev_priv, 0, sizeof(drm_i915_private_t));
1028 dev->dev_private = (void *)dev_priv;
1030 /* Add register map (needed for suspend/resume) */
1031 base = drm_get_resource_start(dev, mmio_bar);
1032 size = drm_get_resource_len(dev, mmio_bar);
1034 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
1035 _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
1038 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1039 intel_init_chipset_flush_compat(dev);
1041 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
1042 intel_opregion_init(dev);
1046 I915_WRITE16(HWSTAM, 0xeffe);
1047 I915_WRITE16(IMR, 0x0);
1048 I915_WRITE16(IER, 0x0);
1050 DRM_SPININIT(&dev_priv->swaps_lock, "swap");
1051 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
1052 dev_priv->swaps_pending = 0;
1054 DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
1055 dev_priv->user_irq_refcount = 0;
1056 dev_priv->irq_enable_reg = 0;
1058 ret = drm_vblank_init(dev, num_pipes);
1062 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1063 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1065 i915_enable_interrupt(dev);
1066 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1069 * Initialize the hardware status page IRQ location.
1072 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1077 int i915_driver_unload(struct drm_device *dev)
1079 struct drm_i915_private *dev_priv = dev->dev_private;
1083 dev_priv->vblank_pipe = 0;
1085 dev_priv->irq_enabled = 0;
1086 I915_WRITE(HWSTAM, 0xffffffff);
1087 I915_WRITE(IMR, 0xffffffff);
1088 I915_WRITE(IER, 0x0);
1090 temp = I915_READ(PIPEASTAT);
1091 I915_WRITE(PIPEASTAT, temp);
1092 temp = I915_READ(PIPEBSTAT);
1093 I915_WRITE(PIPEBSTAT, temp);
1094 temp = I915_READ(IIR);
1095 I915_WRITE(IIR, temp);
1098 if (dev_priv->mmio_map)
1099 drm_rmmap(dev, dev_priv->mmio_map);
1102 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
1103 intel_opregion_free(dev);
1107 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
1111 intel_fini_chipset_flush_compat(dev);
1117 void i915_driver_lastclose(struct drm_device * dev)
1119 drm_i915_private_t *dev_priv = dev->dev_private;
1121 /* agp off can use this to get called before dev_priv */
1125 #ifdef I915_HAVE_BUFFER
1126 if (dev_priv->val_bufs) {
1127 vfree(dev_priv->val_bufs);
1128 dev_priv->val_bufs = NULL;
1132 if (drm_getsarea(dev) && dev_priv->sarea_priv)
1133 i915_do_cleanup_pageflip(dev);
1134 if (dev_priv->agp_heap)
1135 i915_mem_takedown(&(dev_priv->agp_heap));
1136 #if defined(I915_HAVE_BUFFER)
1137 if (dev_priv->sarea_kmap.virtual) {
1138 drm_bo_kunmap(&dev_priv->sarea_kmap);
1139 dev_priv->sarea_kmap.virtual = NULL;
1140 dev->lock.hw_lock = NULL;
1141 dev->sigdata.lock = NULL;
1144 if (dev_priv->sarea_bo) {
1145 mutex_lock(&dev->struct_mutex);
1146 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
1147 mutex_unlock(&dev->struct_mutex);
1148 dev_priv->sarea_bo = NULL;
1151 i915_dma_cleanup(dev);
1154 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1156 drm_i915_private_t *dev_priv = dev->dev_private;
1157 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1160 struct drm_ioctl_desc i915_ioctls[] = {
1161 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1162 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1163 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1164 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1165 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1166 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1167 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1168 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1169 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1170 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1171 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1172 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1173 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1174 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1175 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
1176 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1177 DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
1178 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
1179 #ifdef I915_HAVE_BUFFER
1180 DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
1184 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1187 * Determine if the device really is AGP or not.
1189 * All Intel graphics chipsets are treated as AGP, even if they are really
1192 * \param dev The device to be tested.
1195 * A value of 1 is always retured to indictate every i9x5 is AGP.
1197 int i915_driver_device_is_agp(struct drm_device * dev)
1202 int i915_driver_firstopen(struct drm_device *dev)
1204 #ifdef I915_HAVE_BUFFER
1205 drm_bo_driver_init(dev);