1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
47 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49 #define BEGIN_LP_RING(n) \
50 intel_ring_begin(LP_RING(dev_priv), (n))
53 intel_ring_emit(LP_RING(dev_priv), x)
55 #define ADVANCE_LP_RING() \
56 intel_ring_advance(LP_RING(dev_priv))
59 * Lock test for when it's just for synchronization of ring access.
61 * In that case, we don't need to do it when GEM is initialized as nobody else
62 * has access to the ring.
64 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
65 if (LP_RING(dev->dev_private)->obj == NULL) \
66 LOCK_TEST_WITH_RETURN(dev, file); \
70 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
72 if (I915_NEED_GFX_HWS(dev_priv->dev))
73 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
75 return intel_read_status_page(LP_RING(dev_priv), reg);
78 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
79 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
80 #define I915_BREADCRUMB_INDEX 0x21
82 void i915_update_dri1_breadcrumb(struct drm_device *dev)
84 drm_i915_private_t *dev_priv = dev->dev_private;
85 struct drm_i915_master_private *master_priv;
88 * The dri breadcrumb update races against the drm master disappearing.
89 * Instead of trying to fix this (this is by far not the only ums issue)
90 * just don't do the update in kms mode.
92 if (drm_core_check_feature(dev, DRIVER_MODESET))
95 if (dev->primary->master) {
96 master_priv = dev->primary->master->driver_priv;
97 if (master_priv->sarea_priv)
98 master_priv->sarea_priv->last_dispatch =
99 READ_BREADCRUMB(dev_priv);
103 static void i915_write_hws_pga(struct drm_device *dev)
105 drm_i915_private_t *dev_priv = dev->dev_private;
108 addr = dev_priv->status_page_dmah->busaddr;
109 if (INTEL_INFO(dev)->gen >= 4)
110 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
111 I915_WRITE(HWS_PGA, addr);
115 * Frees the hardware status page, whether it's a physical address or a virtual
116 * address set up by the X Server.
118 static void i915_free_hws(struct drm_device *dev)
120 drm_i915_private_t *dev_priv = dev->dev_private;
121 struct intel_ring_buffer *ring = LP_RING(dev_priv);
123 if (dev_priv->status_page_dmah) {
124 drm_pci_free(dev, dev_priv->status_page_dmah);
125 dev_priv->status_page_dmah = NULL;
128 if (ring->status_page.gfx_addr) {
129 ring->status_page.gfx_addr = 0;
130 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
133 /* Need to rewrite hardware status page */
134 I915_WRITE(HWS_PGA, 0x1ffff000);
137 void i915_kernel_lost_context(struct drm_device * dev)
139 drm_i915_private_t *dev_priv = dev->dev_private;
140 struct drm_i915_master_private *master_priv;
141 struct intel_ring_buffer *ring = LP_RING(dev_priv);
144 * We should never lose context on the ring with modesetting
145 * as we don't expose it to userspace
147 if (drm_core_check_feature(dev, DRIVER_MODESET))
150 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
151 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
152 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
154 ring->space += ring->size;
156 if (!dev->primary->master)
159 master_priv = dev->primary->master->driver_priv;
160 if (ring->head == ring->tail && master_priv->sarea_priv)
161 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
164 static int i915_dma_cleanup(struct drm_device * dev)
166 drm_i915_private_t *dev_priv = dev->dev_private;
169 /* Make sure interrupts are disabled here because the uninstall ioctl
170 * may not have been called from userspace and after dev_private
171 * is freed, it's too late.
173 if (dev->irq_enabled)
174 drm_irq_uninstall(dev);
176 mutex_lock(&dev->struct_mutex);
177 for (i = 0; i < I915_NUM_RINGS; i++)
178 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
179 mutex_unlock(&dev->struct_mutex);
181 /* Clear the HWS virtual address at teardown */
182 if (I915_NEED_GFX_HWS(dev))
188 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
190 drm_i915_private_t *dev_priv = dev->dev_private;
191 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
194 master_priv->sarea = drm_getsarea(dev);
195 if (master_priv->sarea) {
196 master_priv->sarea_priv = (drm_i915_sarea_t *)
197 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
199 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
202 if (init->ring_size != 0) {
203 if (LP_RING(dev_priv)->obj != NULL) {
204 i915_dma_cleanup(dev);
205 DRM_ERROR("Client tried to initialize ringbuffer in "
210 ret = intel_render_ring_init_dri(dev,
214 i915_dma_cleanup(dev);
219 dev_priv->dri1.cpp = init->cpp;
220 dev_priv->dri1.back_offset = init->back_offset;
221 dev_priv->dri1.front_offset = init->front_offset;
222 dev_priv->dri1.current_page = 0;
223 if (master_priv->sarea_priv)
224 master_priv->sarea_priv->pf_current_page = 0;
226 /* Allow hardware batchbuffers unless told otherwise.
228 dev_priv->dri1.allow_batchbuffer = 1;
233 static int i915_dma_resume(struct drm_device * dev)
235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
236 struct intel_ring_buffer *ring = LP_RING(dev_priv);
238 DRM_DEBUG_DRIVER("%s\n", __func__);
240 if (ring->virtual_start == NULL) {
241 DRM_ERROR("can not ioremap virtual address for"
246 /* Program Hardware Status Page */
247 if (!ring->status_page.page_addr) {
248 DRM_ERROR("Can not find hardware status page\n");
251 DRM_DEBUG_DRIVER("hw status page @ %p\n",
252 ring->status_page.page_addr);
253 if (ring->status_page.gfx_addr != 0)
254 intel_ring_setup_status_page(ring);
256 i915_write_hws_pga(dev);
258 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
263 static int i915_dma_init(struct drm_device *dev, void *data,
264 struct drm_file *file_priv)
266 drm_i915_init_t *init = data;
269 if (drm_core_check_feature(dev, DRIVER_MODESET))
272 switch (init->func) {
274 retcode = i915_initialize(dev, init);
276 case I915_CLEANUP_DMA:
277 retcode = i915_dma_cleanup(dev);
279 case I915_RESUME_DMA:
280 retcode = i915_dma_resume(dev);
290 /* Implement basically the same security restrictions as hardware does
291 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
293 * Most of the calculations below involve calculating the size of a
294 * particular instruction. It's important to get the size right as
295 * that tells us where the next instruction to check is. Any illegal
296 * instruction detected will be given a size of zero, which is a
297 * signal to abort the rest of the buffer.
299 static int validate_cmd(int cmd)
301 switch (((cmd >> 29) & 0x7)) {
303 switch ((cmd >> 23) & 0x3f) {
305 return 1; /* MI_NOOP */
307 return 1; /* MI_FLUSH */
309 return 0; /* disallow everything else */
313 return 0; /* reserved */
315 return (cmd & 0xff) + 2; /* 2d commands */
317 if (((cmd >> 24) & 0x1f) <= 0x18)
320 switch ((cmd >> 24) & 0x1f) {
324 switch ((cmd >> 16) & 0xff) {
326 return (cmd & 0x1f) + 2;
328 return (cmd & 0xf) + 2;
330 return (cmd & 0xffff) + 2;
334 return (cmd & 0xffff) + 1;
338 if ((cmd & (1 << 23)) == 0) /* inline vertices */
339 return (cmd & 0x1ffff) + 2;
340 else if (cmd & (1 << 17)) /* indirect random */
341 if ((cmd & 0xffff) == 0)
342 return 0; /* unknown length, too hard */
344 return (((cmd & 0xffff) + 1) / 2) + 1;
346 return 2; /* indirect sequential */
357 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
359 drm_i915_private_t *dev_priv = dev->dev_private;
362 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
365 for (i = 0; i < dwords;) {
366 int sz = validate_cmd(buffer[i]);
367 if (sz == 0 || i + sz > dwords)
372 ret = BEGIN_LP_RING((dwords+1)&~1);
376 for (i = 0; i < dwords; i++)
387 i915_emit_box(struct drm_device *dev,
388 struct drm_clip_rect *box,
391 struct drm_i915_private *dev_priv = dev->dev_private;
394 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
395 box->y2 <= 0 || box->x2 <= 0) {
396 DRM_ERROR("Bad box %d,%d..%d,%d\n",
397 box->x1, box->y1, box->x2, box->y2);
401 if (INTEL_INFO(dev)->gen >= 4) {
402 ret = BEGIN_LP_RING(4);
406 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
407 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
408 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
411 ret = BEGIN_LP_RING(6);
415 OUT_RING(GFX_OP_DRAWRECT_INFO);
417 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
418 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
427 /* XXX: Emitting the counter should really be moved to part of the IRQ
428 * emit. For now, do it in both places:
431 static void i915_emit_breadcrumb(struct drm_device *dev)
433 drm_i915_private_t *dev_priv = dev->dev_private;
434 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
436 dev_priv->dri1.counter++;
437 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
438 dev_priv->dri1.counter = 0;
439 if (master_priv->sarea_priv)
440 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
442 if (BEGIN_LP_RING(4) == 0) {
443 OUT_RING(MI_STORE_DWORD_INDEX);
444 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
445 OUT_RING(dev_priv->dri1.counter);
451 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
452 drm_i915_cmdbuffer_t *cmd,
453 struct drm_clip_rect *cliprects,
456 int nbox = cmd->num_cliprects;
457 int i = 0, count, ret;
460 DRM_ERROR("alignment");
464 i915_kernel_lost_context(dev);
466 count = nbox ? nbox : 1;
468 for (i = 0; i < count; i++) {
470 ret = i915_emit_box(dev, &cliprects[i],
476 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
481 i915_emit_breadcrumb(dev);
485 static int i915_dispatch_batchbuffer(struct drm_device * dev,
486 drm_i915_batchbuffer_t * batch,
487 struct drm_clip_rect *cliprects)
489 struct drm_i915_private *dev_priv = dev->dev_private;
490 int nbox = batch->num_cliprects;
493 if ((batch->start | batch->used) & 0x7) {
494 DRM_ERROR("alignment");
498 i915_kernel_lost_context(dev);
500 count = nbox ? nbox : 1;
501 for (i = 0; i < count; i++) {
503 ret = i915_emit_box(dev, &cliprects[i],
504 batch->DR1, batch->DR4);
509 if (!IS_I830(dev) && !IS_845G(dev)) {
510 ret = BEGIN_LP_RING(2);
514 if (INTEL_INFO(dev)->gen >= 4) {
515 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
516 OUT_RING(batch->start);
518 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
519 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
522 ret = BEGIN_LP_RING(4);
526 OUT_RING(MI_BATCH_BUFFER);
527 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
528 OUT_RING(batch->start + batch->used - 4);
535 if (IS_G4X(dev) || IS_GEN5(dev)) {
536 if (BEGIN_LP_RING(2) == 0) {
537 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
543 i915_emit_breadcrumb(dev);
547 static int i915_dispatch_flip(struct drm_device * dev)
549 drm_i915_private_t *dev_priv = dev->dev_private;
550 struct drm_i915_master_private *master_priv =
551 dev->primary->master->driver_priv;
554 if (!master_priv->sarea_priv)
557 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
559 dev_priv->dri1.current_page,
560 master_priv->sarea_priv->pf_current_page);
562 i915_kernel_lost_context(dev);
564 ret = BEGIN_LP_RING(10);
568 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
571 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
573 if (dev_priv->dri1.current_page == 0) {
574 OUT_RING(dev_priv->dri1.back_offset);
575 dev_priv->dri1.current_page = 1;
577 OUT_RING(dev_priv->dri1.front_offset);
578 dev_priv->dri1.current_page = 0;
582 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
587 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
589 if (BEGIN_LP_RING(4) == 0) {
590 OUT_RING(MI_STORE_DWORD_INDEX);
591 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
592 OUT_RING(dev_priv->dri1.counter);
597 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
601 static int i915_quiescent(struct drm_device *dev)
603 i915_kernel_lost_context(dev);
604 return intel_ring_idle(LP_RING(dev->dev_private));
607 static int i915_flush_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv)
612 if (drm_core_check_feature(dev, DRIVER_MODESET))
615 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
617 mutex_lock(&dev->struct_mutex);
618 ret = i915_quiescent(dev);
619 mutex_unlock(&dev->struct_mutex);
624 static int i915_batchbuffer(struct drm_device *dev, void *data,
625 struct drm_file *file_priv)
627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
628 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
629 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
630 master_priv->sarea_priv;
631 drm_i915_batchbuffer_t *batch = data;
633 struct drm_clip_rect *cliprects = NULL;
635 if (drm_core_check_feature(dev, DRIVER_MODESET))
638 if (!dev_priv->dri1.allow_batchbuffer) {
639 DRM_ERROR("Batchbuffer ioctl disabled\n");
643 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
644 batch->start, batch->used, batch->num_cliprects);
646 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
648 if (batch->num_cliprects < 0)
651 if (batch->num_cliprects) {
652 cliprects = kcalloc(batch->num_cliprects,
653 sizeof(struct drm_clip_rect),
655 if (cliprects == NULL)
658 ret = copy_from_user(cliprects, batch->cliprects,
659 batch->num_cliprects *
660 sizeof(struct drm_clip_rect));
667 mutex_lock(&dev->struct_mutex);
668 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
669 mutex_unlock(&dev->struct_mutex);
672 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
680 static int i915_cmdbuffer(struct drm_device *dev, void *data,
681 struct drm_file *file_priv)
683 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
684 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
685 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
686 master_priv->sarea_priv;
687 drm_i915_cmdbuffer_t *cmdbuf = data;
688 struct drm_clip_rect *cliprects = NULL;
692 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
693 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
695 if (drm_core_check_feature(dev, DRIVER_MODESET))
698 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
700 if (cmdbuf->num_cliprects < 0)
703 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
704 if (batch_data == NULL)
707 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
710 goto fail_batch_free;
713 if (cmdbuf->num_cliprects) {
714 cliprects = kcalloc(cmdbuf->num_cliprects,
715 sizeof(struct drm_clip_rect), GFP_KERNEL);
716 if (cliprects == NULL) {
718 goto fail_batch_free;
721 ret = copy_from_user(cliprects, cmdbuf->cliprects,
722 cmdbuf->num_cliprects *
723 sizeof(struct drm_clip_rect));
730 mutex_lock(&dev->struct_mutex);
731 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
732 mutex_unlock(&dev->struct_mutex);
734 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
739 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
749 static int i915_emit_irq(struct drm_device * dev)
751 drm_i915_private_t *dev_priv = dev->dev_private;
752 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
754 i915_kernel_lost_context(dev);
756 DRM_DEBUG_DRIVER("\n");
758 dev_priv->dri1.counter++;
759 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
760 dev_priv->dri1.counter = 1;
761 if (master_priv->sarea_priv)
762 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
764 if (BEGIN_LP_RING(4) == 0) {
765 OUT_RING(MI_STORE_DWORD_INDEX);
766 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
767 OUT_RING(dev_priv->dri1.counter);
768 OUT_RING(MI_USER_INTERRUPT);
772 return dev_priv->dri1.counter;
775 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
777 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
778 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
780 struct intel_ring_buffer *ring = LP_RING(dev_priv);
782 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
783 READ_BREADCRUMB(dev_priv));
785 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
786 if (master_priv->sarea_priv)
787 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
791 if (master_priv->sarea_priv)
792 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
794 if (ring->irq_get(ring)) {
795 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
796 READ_BREADCRUMB(dev_priv) >= irq_nr);
798 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
802 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
803 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
809 /* Needs the lock as it touches the ring.
811 static int i915_irq_emit(struct drm_device *dev, void *data,
812 struct drm_file *file_priv)
814 drm_i915_private_t *dev_priv = dev->dev_private;
815 drm_i915_irq_emit_t *emit = data;
818 if (drm_core_check_feature(dev, DRIVER_MODESET))
821 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
822 DRM_ERROR("called with no initialization\n");
826 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
828 mutex_lock(&dev->struct_mutex);
829 result = i915_emit_irq(dev);
830 mutex_unlock(&dev->struct_mutex);
832 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
833 DRM_ERROR("copy_to_user\n");
840 /* Doesn't need the hardware lock.
842 static int i915_irq_wait(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
845 drm_i915_private_t *dev_priv = dev->dev_private;
846 drm_i915_irq_wait_t *irqwait = data;
848 if (drm_core_check_feature(dev, DRIVER_MODESET))
852 DRM_ERROR("called with no initialization\n");
856 return i915_wait_irq(dev, irqwait->irq_seq);
859 static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
860 struct drm_file *file_priv)
862 drm_i915_private_t *dev_priv = dev->dev_private;
863 drm_i915_vblank_pipe_t *pipe = data;
865 if (drm_core_check_feature(dev, DRIVER_MODESET))
869 DRM_ERROR("called with no initialization\n");
873 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
879 * Schedule buffer swap at given vertical blank.
881 static int i915_vblank_swap(struct drm_device *dev, void *data,
882 struct drm_file *file_priv)
884 /* The delayed swap mechanism was fundamentally racy, and has been
885 * removed. The model was that the client requested a delayed flip/swap
886 * from the kernel, then waited for vblank before continuing to perform
887 * rendering. The problem was that the kernel might wake the client
888 * up before it dispatched the vblank swap (since the lock has to be
889 * held while touching the ringbuffer), in which case the client would
890 * clear and start the next frame before the swap occurred, and
891 * flicker would occur in addition to likely missing the vblank.
893 * In the absence of this ioctl, userland falls back to a correct path
894 * of waiting for a vblank, then dispatching the swap on its own.
895 * Context switching to userland and back is plenty fast enough for
896 * meeting the requirements of vblank swapping.
901 static int i915_flip_bufs(struct drm_device *dev, void *data,
902 struct drm_file *file_priv)
906 if (drm_core_check_feature(dev, DRIVER_MODESET))
909 DRM_DEBUG_DRIVER("%s\n", __func__);
911 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
913 mutex_lock(&dev->struct_mutex);
914 ret = i915_dispatch_flip(dev);
915 mutex_unlock(&dev->struct_mutex);
920 static int i915_getparam(struct drm_device *dev, void *data,
921 struct drm_file *file_priv)
923 drm_i915_private_t *dev_priv = dev->dev_private;
924 drm_i915_getparam_t *param = data;
928 DRM_ERROR("called with no initialization\n");
932 switch (param->param) {
933 case I915_PARAM_IRQ_ACTIVE:
934 value = dev->pdev->irq ? 1 : 0;
936 case I915_PARAM_ALLOW_BATCHBUFFER:
937 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
939 case I915_PARAM_LAST_DISPATCH:
940 value = READ_BREADCRUMB(dev_priv);
942 case I915_PARAM_CHIPSET_ID:
943 value = dev->pci_device;
945 case I915_PARAM_HAS_GEM:
948 case I915_PARAM_NUM_FENCES_AVAIL:
949 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
951 case I915_PARAM_HAS_OVERLAY:
952 value = dev_priv->overlay ? 1 : 0;
954 case I915_PARAM_HAS_PAGEFLIPPING:
957 case I915_PARAM_HAS_EXECBUF2:
961 case I915_PARAM_HAS_BSD:
962 value = intel_ring_initialized(&dev_priv->ring[VCS]);
964 case I915_PARAM_HAS_BLT:
965 value = intel_ring_initialized(&dev_priv->ring[BCS]);
967 case I915_PARAM_HAS_RELAXED_FENCING:
970 case I915_PARAM_HAS_COHERENT_RINGS:
973 case I915_PARAM_HAS_EXEC_CONSTANTS:
974 value = INTEL_INFO(dev)->gen >= 4;
976 case I915_PARAM_HAS_RELAXED_DELTA:
979 case I915_PARAM_HAS_GEN7_SOL_RESET:
982 case I915_PARAM_HAS_LLC:
983 value = HAS_LLC(dev);
985 case I915_PARAM_HAS_ALIASING_PPGTT:
986 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
988 case I915_PARAM_HAS_WAIT_TIMEOUT:
991 case I915_PARAM_HAS_SEMAPHORES:
992 value = i915_semaphore_is_enabled(dev);
994 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
997 case I915_PARAM_HAS_SECURE_BATCHES:
998 value = capable(CAP_SYS_ADMIN);
1000 case I915_PARAM_HAS_PINNED_BATCHES:
1003 case I915_PARAM_HAS_EXEC_NO_RELOC:
1006 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1010 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1015 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1016 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1023 static int i915_setparam(struct drm_device *dev, void *data,
1024 struct drm_file *file_priv)
1026 drm_i915_private_t *dev_priv = dev->dev_private;
1027 drm_i915_setparam_t *param = data;
1030 DRM_ERROR("called with no initialization\n");
1034 switch (param->param) {
1035 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1037 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1039 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1040 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1042 case I915_SETPARAM_NUM_USED_FENCES:
1043 if (param->value > dev_priv->num_fence_regs ||
1046 /* Userspace can use first N regs */
1047 dev_priv->fence_reg_start = param->value;
1050 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1058 static int i915_set_status_page(struct drm_device *dev, void *data,
1059 struct drm_file *file_priv)
1061 drm_i915_private_t *dev_priv = dev->dev_private;
1062 drm_i915_hws_addr_t *hws = data;
1063 struct intel_ring_buffer *ring;
1065 if (drm_core_check_feature(dev, DRIVER_MODESET))
1068 if (!I915_NEED_GFX_HWS(dev))
1072 DRM_ERROR("called with no initialization\n");
1076 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1077 WARN(1, "tried to set status page when mode setting active\n");
1081 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1083 ring = LP_RING(dev_priv);
1084 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1086 dev_priv->dri1.gfx_hws_cpu_addr =
1087 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1088 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1089 i915_dma_cleanup(dev);
1090 ring->status_page.gfx_addr = 0;
1091 DRM_ERROR("can not ioremap virtual address for"
1092 " G33 hw status page\n");
1096 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1097 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1099 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1100 ring->status_page.gfx_addr);
1101 DRM_DEBUG_DRIVER("load hws at %p\n",
1102 ring->status_page.page_addr);
1106 static int i915_get_bridge_dev(struct drm_device *dev)
1108 struct drm_i915_private *dev_priv = dev->dev_private;
1110 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1111 if (!dev_priv->bridge_dev) {
1112 DRM_ERROR("bridge device not found\n");
1118 #define MCHBAR_I915 0x44
1119 #define MCHBAR_I965 0x48
1120 #define MCHBAR_SIZE (4*4096)
1122 #define DEVEN_REG 0x54
1123 #define DEVEN_MCHBAR_EN (1 << 28)
1125 /* Allocate space for the MCH regs if needed, return nonzero on error */
1127 intel_alloc_mchbar_resource(struct drm_device *dev)
1129 drm_i915_private_t *dev_priv = dev->dev_private;
1130 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1131 u32 temp_lo, temp_hi = 0;
1135 if (INTEL_INFO(dev)->gen >= 4)
1136 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1137 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1138 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1140 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1143 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1147 /* Get some space for it */
1148 dev_priv->mch_res.name = "i915 MCHBAR";
1149 dev_priv->mch_res.flags = IORESOURCE_MEM;
1150 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
1152 MCHBAR_SIZE, MCHBAR_SIZE,
1154 0, pcibios_align_resource,
1155 dev_priv->bridge_dev);
1157 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
1158 dev_priv->mch_res.start = 0;
1162 if (INTEL_INFO(dev)->gen >= 4)
1163 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1164 upper_32_bits(dev_priv->mch_res.start));
1166 pci_write_config_dword(dev_priv->bridge_dev, reg,
1167 lower_32_bits(dev_priv->mch_res.start));
1171 /* Setup MCHBAR if possible, return true if we should disable it again */
1173 intel_setup_mchbar(struct drm_device *dev)
1175 drm_i915_private_t *dev_priv = dev->dev_private;
1176 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1180 dev_priv->mchbar_need_disable = false;
1182 if (IS_I915G(dev) || IS_I915GM(dev)) {
1183 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1184 enabled = !!(temp & DEVEN_MCHBAR_EN);
1186 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1190 /* If it's already enabled, don't have to do anything */
1194 if (intel_alloc_mchbar_resource(dev))
1197 dev_priv->mchbar_need_disable = true;
1199 /* Space is allocated or reserved, so enable it. */
1200 if (IS_I915G(dev) || IS_I915GM(dev)) {
1201 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1202 temp | DEVEN_MCHBAR_EN);
1204 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1205 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1210 intel_teardown_mchbar(struct drm_device *dev)
1212 drm_i915_private_t *dev_priv = dev->dev_private;
1213 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1216 if (dev_priv->mchbar_need_disable) {
1217 if (IS_I915G(dev) || IS_I915GM(dev)) {
1218 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1219 temp &= ~DEVEN_MCHBAR_EN;
1220 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1222 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1224 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1228 if (dev_priv->mch_res.start)
1229 release_resource(&dev_priv->mch_res);
1232 /* true = enable decode, false = disable decoder */
1233 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1235 struct drm_device *dev = cookie;
1237 intel_modeset_vga_set_state(dev, state);
1239 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1240 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1242 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1245 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1247 struct drm_device *dev = pci_get_drvdata(pdev);
1248 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1249 if (state == VGA_SWITCHEROO_ON) {
1250 pr_info("switched on\n");
1251 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1252 /* i915 resume handler doesn't set to D0 */
1253 pci_set_power_state(dev->pdev, PCI_D0);
1255 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1257 pr_err("switched off\n");
1258 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1259 i915_suspend(dev, pmm);
1260 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1264 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1266 struct drm_device *dev = pci_get_drvdata(pdev);
1269 spin_lock(&dev->count_lock);
1270 can_switch = (dev->open_count == 0);
1271 spin_unlock(&dev->count_lock);
1275 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1276 .set_gpu_state = i915_switcheroo_set_state,
1278 .can_switch = i915_switcheroo_can_switch,
1281 static int i915_load_modeset_init(struct drm_device *dev)
1283 struct drm_i915_private *dev_priv = dev->dev_private;
1286 ret = intel_parse_bios(dev);
1288 DRM_INFO("failed to find VBIOS tables\n");
1290 /* If we have > 1 VGA cards, then we need to arbitrate access
1291 * to the common VGA resources.
1293 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1294 * then we do not take part in VGA arbitration and the
1295 * vga_client_register() fails with -ENODEV.
1297 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1298 if (ret && ret != -ENODEV)
1301 intel_register_dsm_handler();
1303 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1305 goto cleanup_vga_client;
1307 /* Initialise stolen first so that we may reserve preallocated
1308 * objects for the BIOS to KMS transition.
1310 ret = i915_gem_init_stolen(dev);
1312 goto cleanup_vga_switcheroo;
1314 ret = drm_irq_install(dev);
1316 goto cleanup_gem_stolen;
1318 /* Important: The output setup functions called by modeset_init need
1319 * working irqs for e.g. gmbus and dp aux transfers. */
1320 intel_modeset_init(dev);
1322 ret = i915_gem_init(dev);
1326 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1328 intel_modeset_gem_init(dev);
1330 /* Always safe in the mode setting case. */
1331 /* FIXME: do pre/post-mode set stuff in core KMS code */
1332 dev->vblank_disable_allowed = 1;
1333 if (INTEL_INFO(dev)->num_pipes == 0) {
1334 dev_priv->mm.suspended = 0;
1338 ret = intel_fbdev_init(dev);
1342 /* Only enable hotplug handling once the fbdev is fully set up. */
1343 intel_hpd_init(dev);
1346 * Some ports require correctly set-up hpd registers for detection to
1347 * work properly (leading to ghost connected connector status), e.g. VGA
1348 * on gm45. Hence we can only set up the initial fbdev config after hpd
1349 * irqs are fully enabled. Now we should scan for the initial config
1350 * only once hotplug handling is enabled, but due to screwed-up locking
1351 * around kms/fbdev init we can't protect the fdbev initial config
1352 * scanning against hotplug events. Hence do this first and ignore the
1353 * tiny window where we will loose hotplug notifactions.
1355 intel_fbdev_initial_config(dev);
1357 /* Only enable hotplug handling once the fbdev is fully set up. */
1358 dev_priv->enable_hotplug_processing = true;
1360 drm_kms_helper_poll_init(dev);
1362 /* We're off and running w/KMS */
1363 dev_priv->mm.suspended = 0;
1368 mutex_lock(&dev->struct_mutex);
1369 i915_gem_cleanup_ringbuffer(dev);
1370 mutex_unlock(&dev->struct_mutex);
1371 i915_gem_cleanup_aliasing_ppgtt(dev);
1373 drm_irq_uninstall(dev);
1375 i915_gem_cleanup_stolen(dev);
1376 cleanup_vga_switcheroo:
1377 vga_switcheroo_unregister_client(dev->pdev);
1379 vga_client_register(dev->pdev, NULL, NULL, NULL);
1384 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1386 struct drm_i915_master_private *master_priv;
1388 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1392 master->driver_priv = master_priv;
1396 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1398 struct drm_i915_master_private *master_priv = master->driver_priv;
1405 master->driver_priv = NULL;
1409 i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1412 dev_priv->mm.gtt_mtrr = -1;
1414 #if defined(CONFIG_X86_PAT)
1419 /* Set up a WC MTRR for non-PAT systems. This is more common than
1420 * one would think, because the kernel disables PAT on first
1421 * generation Core chips because WC PAT gets overridden by a UC
1422 * MTRR if present. Even if a UC MTRR isn't present.
1424 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
1425 if (dev_priv->mm.gtt_mtrr < 0) {
1426 DRM_INFO("MTRR allocation failed. Graphics "
1427 "performance may suffer.\n");
1431 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1433 struct apertures_struct *ap;
1434 struct pci_dev *pdev = dev_priv->dev->pdev;
1437 ap = alloc_apertures(1);
1441 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1442 ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
1445 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1447 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1452 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1454 const struct intel_device_info *info = dev_priv->info;
1456 #define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1457 #define DEV_INFO_SEP ,
1458 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1459 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1461 dev_priv->dev->pdev->device,
1463 #undef DEV_INFO_FLAG
1468 * intel_early_sanitize_regs - clean up BIOS state
1471 * This function must be called before we do any I915_READ or I915_WRITE. Its
1472 * purpose is to clean up any state left by the BIOS that may affect us when
1473 * reading and/or writing registers.
1475 static void intel_early_sanitize_regs(struct drm_device *dev)
1477 struct drm_i915_private *dev_priv = dev->dev_private;
1479 if (IS_HASWELL(dev))
1480 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1484 * i915_driver_load - setup chip and create an initial config
1486 * @flags: startup flags
1488 * The driver load routine has to do several things:
1489 * - drive output discovery via intel_modeset_init()
1490 * - initialize the memory manager
1491 * - allocate initial config memory
1492 * - setup the DRM framebuffer with the allocated memory
1494 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1496 struct drm_i915_private *dev_priv;
1497 struct intel_device_info *info;
1498 int ret = 0, mmio_bar, mmio_size;
1499 uint32_t aperture_size;
1501 info = (struct intel_device_info *) flags;
1503 /* Refuse to load on gen6+ without kms enabled. */
1504 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1507 /* i915 has 4 more counters */
1509 dev->types[6] = _DRM_STAT_IRQ;
1510 dev->types[7] = _DRM_STAT_PRIMARY;
1511 dev->types[8] = _DRM_STAT_SECONDARY;
1512 dev->types[9] = _DRM_STAT_DMA;
1514 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1515 if (dev_priv == NULL)
1518 dev->dev_private = (void *)dev_priv;
1519 dev_priv->dev = dev;
1520 dev_priv->info = info;
1522 spin_lock_init(&dev_priv->irq_lock);
1523 spin_lock_init(&dev_priv->gpu_error.lock);
1524 spin_lock_init(&dev_priv->rps.lock);
1525 spin_lock_init(&dev_priv->gt_lock);
1526 mutex_init(&dev_priv->dpio_lock);
1527 mutex_init(&dev_priv->rps.hw_lock);
1528 mutex_init(&dev_priv->modeset_restore_lock);
1530 i915_dump_device_info(dev_priv);
1532 if (i915_get_bridge_dev(dev)) {
1537 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1538 /* Before gen4, the registers and the GTT are behind different BARs.
1539 * However, from gen4 onwards, the registers and the GTT are shared
1540 * in the same BAR, so we want to restrict this ioremap from
1541 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1542 * the register BAR remains the same size for all the earlier
1543 * generations up to Ironlake.
1546 mmio_size = 512*1024;
1548 mmio_size = 2*1024*1024;
1550 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1551 if (!dev_priv->regs) {
1552 DRM_ERROR("failed to map registers\n");
1557 intel_early_sanitize_regs(dev);
1559 ret = i915_gem_gtt_init(dev);
1563 if (drm_core_check_feature(dev, DRIVER_MODESET))
1564 i915_kick_out_firmware_fb(dev_priv);
1566 pci_set_master(dev->pdev);
1568 /* overlay on gen2 is broken and can't address above 1G */
1570 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1572 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1573 * using 32bit addressing, overwriting memory if HWS is located
1576 * The documentation also mentions an issue with undefined
1577 * behaviour if any general state is accessed within a page above 4GB,
1578 * which also needs to be handled carefully.
1580 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1581 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1583 aperture_size = dev_priv->gtt.mappable_end;
1585 dev_priv->gtt.mappable =
1586 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1588 if (dev_priv->gtt.mappable == NULL) {
1593 i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
1596 /* The i915 workqueue is primarily used for batched retirement of
1597 * requests (and thus managing bo) once the task has been completed
1598 * by the GPU. i915_gem_retire_requests() is called directly when we
1599 * need high-priority retirement, such as waiting for an explicit
1602 * It is also used for periodic low-priority events, such as
1603 * idle-timers and recording error state.
1605 * All tasks on the workqueue are expected to acquire the dev mutex
1606 * so there is no point in running more than one instance of the
1607 * workqueue at any time. Use an ordered one.
1609 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1610 if (dev_priv->wq == NULL) {
1611 DRM_ERROR("Failed to create our workqueue.\n");
1616 /* This must be called before any calls to HAS_PCH_* */
1617 intel_detect_pch(dev);
1619 intel_irq_init(dev);
1621 intel_gt_sanitize(dev);
1624 /* Try to make sure MCHBAR is enabled before poking at it */
1625 intel_setup_mchbar(dev);
1626 intel_setup_gmbus(dev);
1627 intel_opregion_setup(dev);
1629 intel_setup_bios(dev);
1633 /* On the 945G/GM, the chipset reports the MSI capability on the
1634 * integrated graphics even though the support isn't actually there
1635 * according to the published specs. It doesn't appear to function
1636 * correctly in testing on 945G.
1637 * This may be a side effect of MSI having been made available for PEG
1638 * and the registers being closely associated.
1640 * According to chipset errata, on the 965GM, MSI interrupts may
1641 * be lost or delayed, but we use them anyways to avoid
1642 * stuck interrupts on some machines.
1644 if (!IS_I945G(dev) && !IS_I945GM(dev))
1645 pci_enable_msi(dev->pdev);
1647 dev_priv->num_plane = 1;
1648 if (IS_VALLEYVIEW(dev))
1649 dev_priv->num_plane = 2;
1651 if (INTEL_INFO(dev)->num_pipes) {
1652 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1654 goto out_gem_unload;
1657 /* Start out suspended */
1658 dev_priv->mm.suspended = 1;
1660 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1661 ret = i915_load_modeset_init(dev);
1663 DRM_ERROR("failed to init modeset\n");
1664 goto out_gem_unload;
1668 i915_setup_sysfs(dev);
1670 if (INTEL_INFO(dev)->num_pipes) {
1671 /* Must be done after probing outputs */
1672 intel_opregion_init(dev);
1673 acpi_video_register();
1677 intel_gpu_ips_init(dev_priv);
1682 if (dev_priv->mm.inactive_shrinker.shrink)
1683 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1685 if (dev->pdev->msi_enabled)
1686 pci_disable_msi(dev->pdev);
1688 intel_teardown_gmbus(dev);
1689 intel_teardown_mchbar(dev);
1690 pm_qos_remove_request(&dev_priv->pm_qos);
1691 destroy_workqueue(dev_priv->wq);
1693 if (dev_priv->mm.gtt_mtrr >= 0) {
1694 mtrr_del(dev_priv->mm.gtt_mtrr,
1695 dev_priv->gtt.mappable_base,
1697 dev_priv->mm.gtt_mtrr = -1;
1699 io_mapping_free(dev_priv->gtt.mappable);
1700 dev_priv->gtt.gtt_remove(dev);
1702 pci_iounmap(dev->pdev, dev_priv->regs);
1704 pci_dev_put(dev_priv->bridge_dev);
1710 int i915_driver_unload(struct drm_device *dev)
1712 struct drm_i915_private *dev_priv = dev->dev_private;
1715 intel_gpu_ips_teardown();
1717 i915_teardown_sysfs(dev);
1719 if (dev_priv->mm.inactive_shrinker.shrink)
1720 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1722 mutex_lock(&dev->struct_mutex);
1723 ret = i915_gpu_idle(dev);
1725 DRM_ERROR("failed to idle hardware: %d\n", ret);
1726 i915_gem_retire_requests(dev);
1727 mutex_unlock(&dev->struct_mutex);
1729 /* Cancel the retire work handler, which should be idle now. */
1730 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1732 io_mapping_free(dev_priv->gtt.mappable);
1733 if (dev_priv->mm.gtt_mtrr >= 0) {
1734 mtrr_del(dev_priv->mm.gtt_mtrr,
1735 dev_priv->gtt.mappable_base,
1736 dev_priv->gtt.mappable_end);
1737 dev_priv->mm.gtt_mtrr = -1;
1740 acpi_video_unregister();
1742 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1743 intel_fbdev_fini(dev);
1744 intel_modeset_cleanup(dev);
1745 cancel_work_sync(&dev_priv->console_resume_work);
1748 * free the memory space allocated for the child device
1749 * config parsed from VBT
1751 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1752 kfree(dev_priv->child_dev);
1753 dev_priv->child_dev = NULL;
1754 dev_priv->child_dev_num = 0;
1757 vga_switcheroo_unregister_client(dev->pdev);
1758 vga_client_register(dev->pdev, NULL, NULL, NULL);
1761 /* Free error state after interrupts are fully disabled. */
1762 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1763 cancel_work_sync(&dev_priv->gpu_error.work);
1764 i915_destroy_error_state(dev);
1766 if (dev->pdev->msi_enabled)
1767 pci_disable_msi(dev->pdev);
1769 intel_opregion_fini(dev);
1771 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1772 /* Flush any outstanding unpin_work. */
1773 flush_workqueue(dev_priv->wq);
1775 mutex_lock(&dev->struct_mutex);
1776 i915_gem_free_all_phys_object(dev);
1777 i915_gem_cleanup_ringbuffer(dev);
1778 i915_gem_context_fini(dev);
1779 mutex_unlock(&dev->struct_mutex);
1780 i915_gem_cleanup_aliasing_ppgtt(dev);
1781 i915_gem_cleanup_stolen(dev);
1783 if (!I915_NEED_GFX_HWS(dev))
1787 if (dev_priv->regs != NULL)
1788 pci_iounmap(dev->pdev, dev_priv->regs);
1790 intel_teardown_gmbus(dev);
1791 intel_teardown_mchbar(dev);
1793 destroy_workqueue(dev_priv->wq);
1794 pm_qos_remove_request(&dev_priv->pm_qos);
1797 kmem_cache_destroy(dev_priv->slab);
1799 pci_dev_put(dev_priv->bridge_dev);
1800 kfree(dev->dev_private);
1805 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1807 struct drm_i915_file_private *file_priv;
1809 DRM_DEBUG_DRIVER("\n");
1810 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
1814 file->driver_priv = file_priv;
1816 spin_lock_init(&file_priv->mm.lock);
1817 INIT_LIST_HEAD(&file_priv->mm.request_list);
1819 idr_init(&file_priv->context_idr);
1825 * i915_driver_lastclose - clean up after all DRM clients have exited
1828 * Take care of cleaning up after all DRM clients have exited. In the
1829 * mode setting case, we want to restore the kernel's initial mode (just
1830 * in case the last client left us in a bad state).
1832 * Additionally, in the non-mode setting case, we'll tear down the GTT
1833 * and DMA structures, since the kernel won't be using them, and clea
1836 void i915_driver_lastclose(struct drm_device * dev)
1838 drm_i915_private_t *dev_priv = dev->dev_private;
1840 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1841 * goes right around and calls lastclose. Check for this and don't clean
1846 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1847 intel_fb_restore_mode(dev);
1848 vga_switcheroo_process_delayed_switch();
1852 i915_gem_lastclose(dev);
1854 i915_dma_cleanup(dev);
1857 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1859 mutex_lock(&dev->struct_mutex);
1860 i915_gem_context_close(dev, file_priv);
1861 i915_gem_release(dev, file_priv);
1862 mutex_unlock(&dev->struct_mutex);
1865 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1867 struct drm_i915_file_private *file_priv = file->driver_priv;
1872 struct drm_ioctl_desc i915_ioctls[] = {
1873 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1874 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1875 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1876 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1877 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1878 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1879 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1880 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1881 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1882 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1883 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1884 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1885 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1886 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1887 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1888 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1889 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1890 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1891 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1892 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1893 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1894 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1895 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1896 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1897 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1898 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1899 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1900 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1901 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1902 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1903 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1904 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1905 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1906 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1907 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1908 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1909 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1910 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1911 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1912 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1913 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1914 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1915 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1916 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1917 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1918 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1919 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1920 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1923 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1926 * This is really ugly: Because old userspace abused the linux agp interface to
1927 * manage the gtt, we need to claim that all intel devices are agp. For
1928 * otherwise the drm core refuses to initialize the agp support code.
1930 int i915_driver_device_is_agp(struct drm_device * dev)