2 * Copyright (c) 2007 Intel Corporation
3 * Jesse Barnes <jesse.barnes@intel.com>
5 * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
12 #include "drm_sarea.h"
15 #include "intel_bios.h"
16 #include "intel_drv.h"
19 * i915_probe_agp - get AGP bootup configuration
21 * @aperture_size: returns AGP aperture configured size
22 * @preallocated_size: returns size of BIOS preallocated AGP space
24 * Since Intel integrated graphics are UMA, the BIOS has to set aside
25 * some RAM for the framebuffer at early boot. This code figures out
26 * how much was set aside so we can use it for our own purposes.
28 int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
29 unsigned long *preallocated_size)
31 struct pci_dev *bridge_dev;
33 unsigned long overhead;
35 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
37 DRM_ERROR("bridge device not found\n");
41 /* Get the fb aperture size and "stolen" memory amount. */
42 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
43 pci_dev_put(bridge_dev);
45 *aperture_size = 1024 * 1024;
46 *preallocated_size = 1024 * 1024;
48 switch (pdev->device) {
49 case PCI_DEVICE_ID_INTEL_82830_CGC:
50 case PCI_DEVICE_ID_INTEL_82845G_IG:
51 case PCI_DEVICE_ID_INTEL_82855GM_IG:
52 case PCI_DEVICE_ID_INTEL_82865_IG:
53 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
56 *aperture_size *= 128;
59 /* 9xx supports large sizes, just look at the length */
60 *aperture_size = pci_resource_len(pdev, 2);
65 * Some of the preallocated space is taken by the GTT
66 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
68 overhead = (*aperture_size / 1024) + 4096;
69 switch (tmp & INTEL_855_GMCH_GMS_MASK) {
70 case INTEL_855_GMCH_GMS_STOLEN_1M:
71 break; /* 1M already */
72 case INTEL_855_GMCH_GMS_STOLEN_4M:
73 *preallocated_size *= 4;
75 case INTEL_855_GMCH_GMS_STOLEN_8M:
76 *preallocated_size *= 8;
78 case INTEL_855_GMCH_GMS_STOLEN_16M:
79 *preallocated_size *= 16;
81 case INTEL_855_GMCH_GMS_STOLEN_32M:
82 *preallocated_size *= 32;
84 case INTEL_915G_GMCH_GMS_STOLEN_48M:
85 *preallocated_size *= 48;
87 case INTEL_915G_GMCH_GMS_STOLEN_64M:
88 *preallocated_size *= 64;
90 case INTEL_855_GMCH_GMS_DISABLED:
91 DRM_ERROR("video memory is disabled\n");
94 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
95 tmp & INTEL_855_GMCH_GMS_MASK);
98 *preallocated_size -= overhead;
103 static int i915_init_hwstatus(struct drm_device *dev)
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_memrange_node *free_space;
109 /* Program Hardware Status Page */
111 dev_priv->status_page_dmah =
112 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
114 if (!dev_priv->status_page_dmah) {
115 DRM_ERROR("Can not allocate hardware status page\n");
119 dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
120 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
122 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
124 free_space = drm_memrange_search_free(&dev_priv->vram,
128 DRM_ERROR("No free vram available, aborting\n");
133 dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
135 if (!dev_priv->hws) {
136 DRM_ERROR("Unable to allocate or pin hw status page\n");
141 dev_priv->hws_agpoffset = dev_priv->hws->start;
142 dev_priv->hws_map.offset = dev->agp->base +
143 dev_priv->hws->start;
144 dev_priv->hws_map.size = PAGE_SIZE;
145 dev_priv->hws_map.type= 0;
146 dev_priv->hws_map.flags= 0;
147 dev_priv->hws_map.mtrr = 0;
149 drm_core_ioremap(&dev_priv->hws_map, dev);
150 if (dev_priv->hws_map.handle == NULL) {
151 dev_priv->hws_agpoffset = 0;
152 DRM_ERROR("can not ioremap virtual addr for"
153 "G33 hw status page\n");
157 dev_priv->hws_vaddr = dev_priv->hws_map.handle;
158 I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
161 memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
163 DRM_DEBUG("Enabled hardware status page\n");
173 static void i915_cleanup_hwstatus(struct drm_device *dev)
175 struct drm_i915_private *dev_priv = dev->dev_private;
178 if (dev_priv->status_page_dmah)
179 drm_pci_free(dev, dev_priv->status_page_dmah);
181 if (dev_priv->hws_map.handle)
182 drm_core_ioremapfree(&dev_priv->hws_map, dev);
184 drm_memrange_put_block(dev_priv->hws);
186 I915_WRITE(HWS_PGA, 0x1ffff000);
189 static int i915_load_modeset_init(struct drm_device *dev)
191 struct drm_i915_private *dev_priv = dev->dev_private;
192 unsigned long agp_size, prealloc_size;
195 i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
197 /* Basic memrange allocator for stolen space (aka vram) */
198 drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
199 /* Let GEM Manage from end of prealloc space to end of aperture */
200 i915_gem_do_init(dev, prealloc_size, agp_size);
202 ret = i915_gem_init_ringbuffer(dev);
206 ret = i915_init_hwstatus(dev);
208 goto destroy_ringbuffer;
210 /* Allow hardware batchbuffers unless told otherwise.
212 dev_priv->allow_batchbuffer = 1;
213 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
214 mutex_init(&dev_priv->cmdbuf_mutex);
216 dev_priv->wq = create_singlethread_workqueue("i915");
217 if (dev_priv->wq == 0) {
218 DRM_DEBUG("Error\n");
223 ret = intel_init_bios(dev);
225 DRM_ERROR("failed to find VBIOS tables\n");
230 intel_modeset_init(dev);
231 drm_helper_initial_config(dev, false);
233 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
236 goto modeset_cleanup;
239 ret = drm_irq_install(dev);
242 goto modeset_cleanup;
247 intel_modeset_cleanup(dev);
249 destroy_workqueue(dev_priv->wq);
251 i915_cleanup_hwstatus(dev);
253 i915_gem_cleanup_ringbuffer(dev);
259 * i915_driver_load - setup chip and create an initial config
261 * @flags: startup flags
263 * The driver load routine has to do several things:
264 * - drive output discovery via intel_modeset_init()
265 * - initialize the memory manager
266 * - allocate initial config memory
267 * - setup the DRM framebuffer with the allocated memory
269 int i915_driver_load(struct drm_device *dev, unsigned long flags)
271 struct drm_i915_private *dev_priv;
272 int ret = 0, num_pipes = 2;
275 dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
276 if (dev_priv == NULL)
279 memset(dev_priv, 0, sizeof(struct drm_i915_private));
280 dev->dev_private = (void *)dev_priv;
283 /* i915 has 4 more counters */
285 dev->types[6] = _DRM_STAT_IRQ;
286 dev->types[7] = _DRM_STAT_PRIMARY;
287 dev->types[8] = _DRM_STAT_SECONDARY;
288 dev->types[9] = _DRM_STAT_DMA;
290 if (IS_MOBILE(dev) || IS_I9XX(dev))
291 dev_priv->cursor_needs_physical = true;
293 dev_priv->cursor_needs_physical = false;
295 if (IS_I965G(dev) || IS_G33(dev))
296 dev_priv->cursor_needs_physical = false;
299 pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
302 dev_priv->mmiobase = drm_get_resource_start(dev, 0);
303 dev_priv->mmiolen = drm_get_resource_len(dev, 0);
304 dev->mode_config.fb_base =
305 drm_get_resource_start(dev, 2) & 0xff000000;
306 } else if (drm_get_resource_start(dev, 1)) {
307 dev_priv->mmiobase = drm_get_resource_start(dev, 1);
308 dev_priv->mmiolen = drm_get_resource_len(dev, 1);
309 dev->mode_config.fb_base =
310 drm_get_resource_start(dev, 0) & 0xff000000;
312 DRM_ERROR("Unable to find MMIO registers\n");
317 DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
319 ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
320 _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER,
321 &dev_priv->mmio_map);
323 DRM_ERROR("Cannot add mapping for MMIO registers\n");
327 INIT_LIST_HEAD(&dev_priv->mm.active_list);
328 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
329 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
330 INIT_LIST_HEAD(&dev_priv->mm.request_list);
331 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
332 i915_gem_retire_work_handler);
333 dev_priv->mm.next_gem_seqno = 1;
336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
337 intel_init_chipset_flush_compat(dev);
339 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
340 intel_opregion_init(dev);
344 tmp = I915_READ(PIPEASTAT);
345 I915_WRITE(PIPEASTAT, tmp);
346 tmp = I915_READ(PIPEBSTAT);
347 I915_WRITE(PIPEBSTAT, tmp);
349 atomic_set(&dev_priv->irq_received, 0);
350 I915_WRITE(HWSTAM, 0xeffe);
351 I915_WRITE(IMR, 0x0);
352 I915_WRITE(IER, 0x0);
354 DRM_SPININIT(&dev_priv->swaps_lock, "swap");
355 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
356 dev_priv->swaps_pending = 0;
358 DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
359 dev_priv->user_irq_refcount = 0;
360 dev_priv->irq_mask_reg = ~0;
362 ret = drm_vblank_init(dev, num_pipes);
366 ret = drm_hotplug_init(dev);
370 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
371 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
373 i915_enable_interrupt(dev);
374 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
377 * Initialize the hardware status page IRQ location.
380 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
382 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
383 ret = i915_load_modeset_init(dev);
385 DRM_ERROR("failed to init modeset\n");
393 drm_rmmap(dev, dev_priv->mmio_map);
395 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
399 int i915_driver_unload(struct drm_device *dev)
401 struct drm_i915_private *dev_priv = dev->dev_private;
405 dev_priv->vblank_pipe = 0;
407 dev_priv->irq_enabled = 0;
409 I915_WRITE(HWSTAM, 0xffffffff);
410 I915_WRITE(IMR, 0xffffffff);
411 I915_WRITE(IER, 0x0);
413 temp = I915_READ(PIPEASTAT);
414 I915_WRITE(PIPEASTAT, temp);
415 temp = I915_READ(PIPEBSTAT);
416 I915_WRITE(PIPEBSTAT, temp);
417 temp = I915_READ(IIR);
418 I915_WRITE(IIR, temp);
420 I915_WRITE(PRB0_CTL, 0);
422 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
423 drm_irq_uninstall(dev);
424 intel_modeset_cleanup(dev);
425 destroy_workqueue(dev_priv->wq);
429 if (dev_priv->ring.virtual_start) {
430 drm_core_ioremapfree(&dev_priv->ring.map, dev);
435 if (dev_priv->sarea_kmap.virtual) {
436 drm_bo_kunmap(&dev_priv->sarea_kmap);
437 dev_priv->sarea_kmap.virtual = NULL;
438 dev->sigdata.lock = NULL;
441 if (dev_priv->sarea_bo) {
442 mutex_lock(&dev->struct_mutex);
443 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
444 mutex_unlock(&dev->struct_mutex);
445 dev_priv->sarea_bo = NULL;
448 i915_cleanup_hwstatus(dev);
450 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
451 mutex_lock(&dev->struct_mutex);
452 i915_gem_cleanup_ringbuffer(dev);
453 mutex_unlock(&dev->struct_mutex);
454 drm_memrange_takedown(&dev_priv->vram);
455 i915_gem_lastclose(dev);
458 drm_rmmap(dev, dev_priv->mmio_map);
461 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
462 intel_opregion_free(dev);
464 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
465 intel_fini_chipset_flush_compat(dev);
469 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
471 dev->dev_private = NULL;
475 int i915_master_create(struct drm_device *dev, struct drm_master *master)
477 struct drm_i915_master_private *master_priv;
478 unsigned long sareapage;
481 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
485 /* prebuild the SAREA */
486 sareapage = max(SAREA_MAX, PAGE_SIZE);
487 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
488 &master_priv->sarea);
490 DRM_ERROR("SAREA setup failed\n");
493 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
494 master_priv->sarea_priv->pf_current_page = 0;
496 master->driver_priv = master_priv;
500 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
502 struct drm_i915_master_private *master_priv = master->driver_priv;
507 if (master_priv->sarea)
508 drm_rmmap(dev, master_priv->sarea);
510 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
512 master->driver_priv = NULL;
515 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
517 struct drm_i915_file_private *i915_file_priv;
520 i915_file_priv = (struct drm_i915_file_private *)
521 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
526 file_priv->driver_priv = i915_file_priv;
528 i915_file_priv->mm.last_gem_seqno = 0;
529 i915_file_priv->mm.last_gem_throttle_seqno = 0;
534 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
536 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
538 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
541 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
543 struct drm_i915_private *dev_priv = dev->dev_private;
544 if (!drm_core_check_feature(dev, DRIVER_MODESET))
545 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
548 void i915_driver_lastclose(struct drm_device * dev)
550 struct drm_i915_private *dev_priv = dev->dev_private;
552 if (drm_core_check_feature(dev, DRIVER_MODESET))
555 #ifdef I915_HAVE_BUFFER
556 if (dev_priv->val_bufs) {
557 vfree(dev_priv->val_bufs);
558 dev_priv->val_bufs = NULL;
562 i915_gem_lastclose(dev);
564 if (dev_priv->agp_heap)
565 i915_mem_takedown(&(dev_priv->agp_heap));
568 if (dev_priv->sarea_kmap.virtual) {
569 drm_bo_kunmap(&dev_priv->sarea_kmap);
570 dev_priv->sarea_kmap.virtual = NULL;
571 dev->control->master->lock.hw_lock = NULL;
572 dev->sigdata.lock = NULL;
575 if (dev_priv->sarea_bo) {
576 mutex_lock(&dev->struct_mutex);
577 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
578 mutex_unlock(&dev->struct_mutex);
579 dev_priv->sarea_bo = NULL;
583 i915_dma_cleanup(dev);
586 int i915_driver_firstopen(struct drm_device *dev)
588 if (drm_core_check_feature(dev, DRIVER_MODESET))
590 #if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
591 drm_bo_driver_init(dev);