2 * Copyright (c) 2007 Intel Corporation
3 * Jesse Barnes <jesse.barnes@intel.com>
5 * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
12 #include "drm_sarea.h"
15 #include "intel_bios.h"
16 #include "intel_drv.h"
19 * i915_probe_agp - get AGP bootup configuration
21 * @aperture_size: returns AGP aperture configured size
22 * @preallocated_size: returns size of BIOS preallocated AGP space
24 * Since Intel integrated graphics are UMA, the BIOS has to set aside
25 * some RAM for the framebuffer at early boot. This code figures out
26 * how much was set aside so we can use it for our own purposes.
28 int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
29 unsigned long *preallocated_size)
31 struct pci_dev *bridge_dev;
33 unsigned long overhead;
35 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
37 DRM_ERROR("bridge device not found\n");
41 /* Get the fb aperture size and "stolen" memory amount. */
42 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
43 pci_dev_put(bridge_dev);
45 *aperture_size = 1024 * 1024;
46 *preallocated_size = 1024 * 1024;
48 switch (pdev->device) {
49 case PCI_DEVICE_ID_INTEL_82830_CGC:
50 case PCI_DEVICE_ID_INTEL_82845G_IG:
51 case PCI_DEVICE_ID_INTEL_82855GM_IG:
52 case PCI_DEVICE_ID_INTEL_82865_IG:
53 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
56 *aperture_size *= 128;
59 /* 9xx supports large sizes, just look at the length */
60 *aperture_size = pci_resource_len(pdev, 2);
65 * Some of the preallocated space is taken by the GTT
66 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
68 overhead = (*aperture_size / 1024) + 4096;
69 switch (tmp & INTEL_855_GMCH_GMS_MASK) {
70 case INTEL_855_GMCH_GMS_STOLEN_1M:
71 break; /* 1M already */
72 case INTEL_855_GMCH_GMS_STOLEN_4M:
73 *preallocated_size *= 4;
75 case INTEL_855_GMCH_GMS_STOLEN_8M:
76 *preallocated_size *= 8;
78 case INTEL_855_GMCH_GMS_STOLEN_16M:
79 *preallocated_size *= 16;
81 case INTEL_855_GMCH_GMS_STOLEN_32M:
82 *preallocated_size *= 32;
84 case INTEL_915G_GMCH_GMS_STOLEN_48M:
85 *preallocated_size *= 48;
87 case INTEL_915G_GMCH_GMS_STOLEN_64M:
88 *preallocated_size *= 64;
90 case INTEL_855_GMCH_GMS_DISABLED:
91 DRM_ERROR("video memory is disabled\n");
94 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
95 tmp & INTEL_855_GMCH_GMS_MASK);
98 *preallocated_size -= overhead;
103 int i915_load_modeset_init(struct drm_device *dev)
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 unsigned long agp_size, prealloc_size;
109 i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
110 printk("setting up %ld bytes of VRAM space\n", prealloc_size);
111 printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
113 drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1);
114 drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT,
115 (agp_size - prealloc_size) >> PAGE_SHIFT, 1);
116 I915_WRITE(PRB0_CTL, 0);
117 I915_WRITE(PRB0_HEAD, 0);
118 I915_WRITE(PRB0_TAIL, 0);
120 size = PRIMARY_RINGBUFFER_SIZE;
121 ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
122 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
123 DRM_BO_FLAG_MEM_VRAM |
124 DRM_BO_FLAG_NO_EVICT,
125 DRM_BO_HINT_DONT_FENCE, 0x1, 0,
126 &dev_priv->ring_buffer);
128 DRM_ERROR("Unable to allocate or pin ring buffer\n");
132 /* remap the buffer object properly */
133 dev_priv->ring.Start = dev_priv->ring_buffer->offset;
134 dev_priv->ring.End = dev_priv->ring.Start + size;
135 dev_priv->ring.Size = size;
136 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
138 /* FIXME: need wrapper with PCI mem checks */
139 ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
140 (void **) &dev_priv->ring.virtual_start);
142 DRM_ERROR("error mapping ring buffer: %d\n", ret);
143 goto destroy_ringbuffer;
146 DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
147 dev_priv->ring.virtual_start, dev_priv->ring.Size);
149 memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
150 I915_WRITE(PRB0_START, dev_priv->ring.Start);
151 I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
152 (RING_NO_REPORT | RING_VALID));
154 /* We are using separate values as placeholders for mechanisms for
155 * private backbuffer/depthbuffer usage.
157 dev_priv->use_mi_batchbuffer_start = 0;
158 if (IS_I965G(dev)) /* 965 doesn't support older method */
159 dev_priv->use_mi_batchbuffer_start = 1;
161 /* Allow hardware batchbuffers unless told otherwise.
163 dev_priv->allow_batchbuffer = 1;
164 dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
165 mutex_init(&dev_priv->cmdbuf_mutex);
167 /* Program Hardware Status Page */
169 dev_priv->status_page_dmah =
170 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
172 if (!dev_priv->status_page_dmah) {
173 DRM_ERROR("Can not allocate hardware status page\n");
175 goto destroy_ringbuffer;
177 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
178 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
180 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
182 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
185 ret = drm_buffer_object_create(dev, size,
187 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
188 DRM_BO_FLAG_MEM_VRAM |
189 DRM_BO_FLAG_NO_EVICT,
190 DRM_BO_HINT_DONT_FENCE, 0x1, 0,
193 DRM_ERROR("Unable to allocate or pin hw status page\n");
195 goto destroy_ringbuffer;
198 dev_priv->status_gfx_addr =
199 dev_priv->hws_bo->offset & (0x1ffff << 12);
200 dev_priv->hws_map.offset = dev->agp->base +
201 dev_priv->hws_bo->offset;
202 dev_priv->hws_map.size = size;
203 dev_priv->hws_map.type= 0;
204 dev_priv->hws_map.flags= 0;
205 dev_priv->hws_map.mtrr = 0;
207 drm_core_ioremap(&dev_priv->hws_map, dev);
208 if (dev_priv->hws_map.handle == NULL) {
209 dev_priv->status_gfx_addr = 0;
210 DRM_ERROR("can not ioremap virtual addr for"
211 "G33 hw status page\n");
215 dev_priv->hw_status_page = dev_priv->hws_map.handle;
216 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
217 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
219 DRM_DEBUG("Enabled hardware status page\n");
221 dev_priv->wq = create_singlethread_workqueue("i915");
222 if (dev_priv->wq == 0) {
223 DRM_DEBUG("Error\n");
228 ret = intel_init_bios(dev);
230 DRM_ERROR("failed to find VBIOS tables\n");
235 intel_modeset_init(dev);
236 drm_helper_initial_config(dev, false);
238 drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM");
239 drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT");
241 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
244 goto modeset_cleanup;
247 ret = drm_irq_install(dev);
250 goto modeset_cleanup;
255 intel_modeset_cleanup(dev);
257 destroy_workqueue(dev_priv->wq);
260 if (dev_priv->status_page_dmah)
261 drm_pci_free(dev, dev_priv->status_page_dmah);
263 if (dev_priv->hws_map.handle)
264 drm_core_ioremapfree(&dev_priv->hws_map, dev);
265 if (dev_priv->hws_bo)
266 drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
268 I915_WRITE(HWS_PGA, 0x1ffff000);
270 if (dev_priv->ring.virtual_start)
271 drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
272 dev_priv->ring.virtual_start);
273 if (dev_priv->ring_buffer)
274 drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer);
276 drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1);
277 drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1);
282 * i915_driver_load - setup chip and create an initial config
284 * @flags: startup flags
286 * The driver load routine has to do several things:
287 * - drive output discovery via intel_modeset_init()
288 * - initialize the memory manager
289 * - allocate initial config memory
290 * - setup the DRM framebuffer with the allocated memory
292 int i915_driver_load(struct drm_device *dev, unsigned long flags)
294 struct drm_i915_private *dev_priv;
297 dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
298 if (dev_priv == NULL)
301 memset(dev_priv, 0, sizeof(struct drm_i915_private));
302 dev->dev_private = (void *)dev_priv;
303 // dev_priv->flags = flags;
305 /* i915 has 4 more counters */
307 dev->types[6] = _DRM_STAT_IRQ;
308 dev->types[7] = _DRM_STAT_PRIMARY;
309 dev->types[8] = _DRM_STAT_SECONDARY;
310 dev->types[9] = _DRM_STAT_DMA;
312 if (IS_MOBILE(dev) || IS_I9XX(dev))
313 dev_priv->cursor_needs_physical = true;
315 dev_priv->cursor_needs_physical = false;
317 if (IS_I965G(dev) || IS_G33(dev))
318 dev_priv->cursor_needs_physical = false;
321 pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
322 DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
326 dev_priv->mmiobase = drm_get_resource_start(dev, 0);
327 dev_priv->mmiolen = drm_get_resource_len(dev, 0);
328 dev->mode_config.fb_base =
329 drm_get_resource_start(dev, 2) & 0xff000000;
330 } else if (drm_get_resource_start(dev, 1)) {
331 dev_priv->mmiobase = drm_get_resource_start(dev, 1);
332 dev_priv->mmiolen = drm_get_resource_len(dev, 1);
333 dev->mode_config.fb_base =
334 drm_get_resource_start(dev, 0) & 0xff000000;
336 DRM_ERROR("Unable to find MMIO registers\n");
341 DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
343 ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
344 _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER,
345 &dev_priv->mmio_map);
347 DRM_ERROR("Cannot add mapping for MMIO registers\n");
352 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
353 intel_init_chipset_flush_compat(dev);
357 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
359 * Initialize the memory manager for local and AGP space
361 ret = drm_bo_driver_init(dev);
363 DRM_ERROR("fail to init memory manager for "
364 "local & AGP space\n");
368 ret = i915_load_modeset_init(dev);
370 DRM_ERROR("failed to init modeset\n");
377 drm_bo_driver_finish(dev);
379 drm_rmmap(dev, dev_priv->mmio_map);
381 drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
385 int i915_driver_unload(struct drm_device *dev)
387 struct drm_i915_private *dev_priv = dev->dev_private;
389 I915_WRITE(PRB0_CTL, 0);
391 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
392 drm_irq_uninstall(dev);
393 intel_modeset_cleanup(dev);
394 destroy_workqueue(dev_priv->wq);
398 if (dev_priv->ring.virtual_start) {
399 drm_core_ioremapfree(&dev_priv->ring.map, dev);
402 if (dev_priv->sarea_kmap.virtual) {
403 drm_bo_kunmap(&dev_priv->sarea_kmap);
404 dev_priv->sarea_kmap.virtual = NULL;
405 dev->sigdata.lock = NULL;
408 if (dev_priv->sarea_bo) {
409 mutex_lock(&dev->struct_mutex);
410 drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
411 mutex_unlock(&dev->struct_mutex);
412 dev_priv->sarea_bo = NULL;
415 if (dev_priv->status_page_dmah) {
416 drm_pci_free(dev, dev_priv->status_page_dmah);
417 dev_priv->status_page_dmah = NULL;
418 dev_priv->hw_status_page = NULL;
419 dev_priv->dma_status_page = 0;
420 /* Need to rewrite hardware status page */
421 I915_WRITE(HWS_PGA, 0x1ffff000);
424 if (dev_priv->status_gfx_addr) {
425 dev_priv->status_gfx_addr = 0;
426 drm_core_ioremapfree(&dev_priv->hws_map, dev);
427 drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
428 I915_WRITE(HWS_PGA, 0x1ffff000);
431 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
432 drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
433 dev_priv->ring.virtual_start);
435 DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage));
436 mutex_lock(&dev->struct_mutex);
437 drm_bo_usage_deref_locked(&dev_priv->ring_buffer);
439 if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
440 DRM_ERROR("Memory manager type 3 not clean. "
441 "Delaying takedown\n");
443 if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
444 DRM_ERROR("Memory manager type 3 not clean. "
445 "Delaying takedown\n");
447 mutex_unlock(&dev->struct_mutex);
450 drm_bo_driver_finish(dev);
453 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
454 intel_init_chipset_flush_compat(dev);
458 DRM_DEBUG("%p\n", dev_priv->mmio_map);
459 drm_rmmap(dev, dev_priv->mmio_map);
461 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
463 dev->dev_private = NULL;
467 int i915_master_create(struct drm_device *dev, struct drm_master *master)
469 struct drm_i915_master_private *master_priv;
470 unsigned long sareapage;
473 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
477 /* prebuild the SAREA */
478 sareapage = max(SAREA_MAX, PAGE_SIZE);
479 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
480 &master_priv->sarea);
482 DRM_ERROR("SAREA setup failed\n");
485 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
486 master_priv->sarea_priv->pf_current_page = 0;
488 master->driver_priv = master_priv;
492 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
494 struct drm_i915_master_private *master_priv = master->driver_priv;
499 if (master_priv->sarea)
500 drm_rmmap(dev, master_priv->sarea);
502 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
504 master->driver_priv = NULL;
507 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
509 struct drm_i915_private *dev_priv = dev->dev_private;
510 if (drm_core_check_feature(dev, DRIVER_MODESET))
511 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
514 void i915_driver_lastclose(struct drm_device * dev)
516 struct drm_i915_private *dev_priv = dev->dev_private;
518 if (drm_core_check_feature(dev, DRIVER_MODESET))
521 if (dev_priv->agp_heap)
522 i915_mem_takedown(&(dev_priv->agp_heap));
524 i915_dma_cleanup(dev);
527 int i915_driver_firstopen(struct drm_device *dev)
529 if (drm_core_check_feature(dev, DRIVER_MODESET))
532 drm_bo_driver_init(dev);