From: Dave Airlie Date: Thu, 28 Feb 2008 06:24:17 +0000 (+1000) Subject: drm: add modesetting as a driver feature. X-Git-Tag: libdrm-2.4.3~16^2~234^2~24 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=01dcc47d895997f77c9457558e974d41c23ed4e1;p=platform%2Fupstream%2Flibdrm.git drm: add modesetting as a driver feature. This change adds a driver feature that for i915 is controlled by a module parameter. You now need to do insmod i915.ko modeset=1 to enable it the modesetting paths. It also fixes up lots of X paths. I can run my new DDX driver on this code with and without modesetting enabled --- diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 51b02dc..1a0a565 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -106,6 +106,7 @@ struct drm_file; #define DRIVER_IRQ_SHARED 0x80 #define DRIVER_DMA_QUEUE 0x100 #define DRIVER_FB_DMA 0x200 +#define DRIVER_MODESET 0x400 /*@}*/ diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 4932ea5..dd76421 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -414,7 +414,8 @@ static void drm_cleanup(struct drm_device * dev) drm_ht_remove(&dev->object_hash); drm_put_minor(&dev->primary); - drm_put_minor(&dev->control); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_put_minor(&dev->control); if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); } diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 5a74f42..fcadc54 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -481,7 +481,8 @@ int drm_release(struct inode *inode, struct file *filp) } mutex_unlock(&dev->ctxlist_mutex); - drm_fb_release(filp); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_fb_release(filp); file_priv->master = NULL; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 637f5e1..7dddbe1 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -261,6 +261,9 @@ int drm_irq_uninstall(struct drm_device * dev) if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + mutex_lock(&dev->struct_mutex); irq_enabled = dev->irq_enabled; dev->irq_enabled = 0; @@ -306,6 +309,8 @@ int drm_control(struct drm_device *dev, void *data, case DRM_INST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != dev->irq) return -EINVAL; diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 334c8f0..6856075 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -343,8 +343,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, goto err_g3; } - if ((ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL))) - goto err_g3; + /* only add the control node on a modesetting platform */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + if ((ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL))) + goto err_g3; if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) goto err_g4; @@ -361,7 +363,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, err_g5: drm_put_minor(&dev->primary); err_g4: - drm_put_minor(&dev->control); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_put_minor(&dev->control); err_g3: if (!drm_fb_loaded) pci_disable_device(pdev); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 6c12f1a..b844dfe 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -39,6 +39,9 @@ static struct pci_device_id pciidlist[] = { i915_PCI_IDS }; +unsigned int i915_modeset = 0; +module_param_named(modeset, i915_modeset, int, 0400); + #ifdef I915_HAVE_FENCE extern struct drm_fence_driver i915_fence_driver; #endif @@ -563,8 +566,8 @@ static struct drm_driver driver = { DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = i915_driver_load, .unload = i915_driver_unload, -/* .lastclose = i915_driver_lastclose, - .preclose = i915_driver_preclose, */ + .lastclose = i915_driver_lastclose, + .preclose = i915_driver_preclose, .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, @@ -624,6 +627,9 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int __init i915_init(void) { driver.num_ioctls = i915_max_ioctl; + if (i915_modeset == 1) + driver.driver_features |= DRIVER_MODESET; + return drm_init(&driver, pciidlist); } diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index eee24dd..20095a9 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -66,6 +66,11 @@ void i915_kernel_lost_context(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_ring_buffer *ring = &(dev_priv->ring); + /* we should never lose context on the ring with modesetting + * as we don't expose it to userspace */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); @@ -75,6 +80,11 @@ void i915_kernel_lost_context(struct drm_device * dev) int i915_dma_cleanup(struct drm_device * dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -82,6 +92,28 @@ int i915_dma_cleanup(struct drm_device * dev) if (dev->irq) drm_irq_uninstall(dev); + if (dev_priv->ring.virtual_start) { + drm_core_ioremapfree(&dev_priv->ring.map, dev); + dev_priv->ring.virtual_start = 0; + dev_priv->ring.map.handle = 0; + dev_priv->ring.map.size = 0; + dev_priv->ring.Size = 0; + } + + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + /* Need to rewrite hardware status page */ + I915_WRITE(0x02080, 0x1ffff000); + } + + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + I915_WRITE(0x02080, 0x1ffff000); + } + + return 0; } @@ -153,13 +185,17 @@ static int i915_initialize(struct drm_device * dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); - if (!dev_priv->mmio_map) { - i915_dma_cleanup(dev); - DRM_ERROR("can not find mmio map!\n"); - return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + if (init->mmio_offset != 0) + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + i915_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } } + #ifdef I915_HAVE_BUFFER dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; #endif @@ -246,6 +282,9 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG("\n"); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + if (!dev_priv->mmio_map) { DRM_ERROR("can not find mmio map!\n"); return -EINVAL; diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c index b2d3f0d..a2e08bc 100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@ -153,106 +153,113 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base); ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen, - _DRM_REGISTERS, _DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map); + _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map); if (ret != 0) { DRM_ERROR("Cannot add mapping for MMIO registers\n"); return ret; } +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + /* * Initialize the memory manager for local and AGP space */ drm_bo_driver_init(dev); - i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); - printk("setting up %ld bytes of VRAM space\n", prealloc_size); - printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size)); - drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1); - drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, (agp_size - prealloc_size) >> PAGE_SHIFT, 1); - - I915_WRITE(LP_RING + RING_LEN, 0); - I915_WRITE(LP_RING + RING_HEAD, 0); - I915_WRITE(LP_RING + RING_TAIL, 0); - - size = PRIMARY_RINGBUFFER_SIZE; - ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel, - DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, 0x1, 0, - &dev_priv->ring_buffer); - if (ret < 0) { - DRM_ERROR("Unable to allocate or pin ring buffer\n"); - return -EINVAL; - } - - /* remap the buffer object properly */ - dev_priv->ring.Start = dev_priv->ring_buffer->offset; - dev_priv->ring.End = dev_priv->ring.Start + size; - dev_priv->ring.Size = size; - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; - - /* FIXME: need wrapper with PCI mem checks */ - ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem, - (void **) &dev_priv->ring.virtual_start); - if (ret) - DRM_ERROR("error mapping ring buffer: %d\n", ret); - - DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start, - dev_priv->ring.virtual_start, dev_priv->ring.Size); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); + printk("setting up %ld bytes of VRAM space\n", prealloc_size); + printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size)); + drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1); + drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, (agp_size - prealloc_size) >> PAGE_SHIFT, 1); + + I915_WRITE(LP_RING + RING_LEN, 0); + I915_WRITE(LP_RING + RING_HEAD, 0); + I915_WRITE(LP_RING + RING_TAIL, 0); + + size = PRIMARY_RINGBUFFER_SIZE; + ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel, + DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_MEM_VRAM | + DRM_BO_FLAG_NO_EVICT, + DRM_BO_HINT_DONT_FENCE, 0x1, 0, + &dev_priv->ring_buffer); + if (ret < 0) { + DRM_ERROR("Unable to allocate or pin ring buffer\n"); + return -EINVAL; + } + + /* remap the buffer object properly */ + dev_priv->ring.Start = dev_priv->ring_buffer->offset; + dev_priv->ring.End = dev_priv->ring.Start + size; + dev_priv->ring.Size = size; + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + /* FIXME: need wrapper with PCI mem checks */ + ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem, + (void **) &dev_priv->ring.virtual_start); + if (ret) + DRM_ERROR("error mapping ring buffer: %d\n", ret); + + DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start, + dev_priv->ring.virtual_start, dev_priv->ring.Size); // - memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); - - I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start); - I915_WRITE(LP_RING + RING_LEN, - ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | - (RING_NO_REPORT | RING_VALID)); - - /* We are using separate values as placeholders for mechanisms for - * private backbuffer/depthbuffer usage. - */ - dev_priv->use_mi_batchbuffer_start = 0; + memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); + + I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start); + I915_WRITE(LP_RING + RING_LEN, + ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | + (RING_NO_REPORT | RING_VALID)); + + /* We are using separate values as placeholders for mechanisms for + * private backbuffer/depthbuffer usage. + */ + dev_priv->use_mi_batchbuffer_start = 0; + + /* Allow hardware batchbuffers unless told otherwise. + */ + dev_priv->allow_batchbuffer = 1; + + /* Program Hardware Status Page */ + if (!IS_G33(dev)) { + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + dev->dev_private = (void *)dev_priv; + i915_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); + } + DRM_DEBUG("Enabled hardware status page\n"); - /* Allow hardware batchbuffers unless told otherwise. - */ - dev_priv->allow_batchbuffer = 1; - - /* Program Hardware Status Page */ - if (!IS_G33(dev)) { - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - dev->dev_private = (void *)dev_priv; - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; + dev_priv->wq = create_singlethread_workqueue("i915"); + if (dev_priv == 0) { + DRM_DEBUG("Error\n"); } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + intel_modeset_init(dev); + drm_initial_config(dev, false); - I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); - } - DRM_DEBUG("Enabled hardware status page\n"); + drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM"); + drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT"); - dev_priv->wq = create_singlethread_workqueue("i915"); - if (dev_priv == 0) { - DRM_DEBUG("Error\n"); + drm_irq_install(dev); } - - intel_modeset_init(dev); - drm_initial_config(dev, false); - - drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM"); - drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT"); - - drm_irq_install(dev); - return 0; } @@ -262,7 +269,10 @@ int i915_driver_unload(struct drm_device *dev) I915_WRITE(LP_RING + RING_LEN, 0); - intel_modeset_cleanup(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_modeset_cleanup(dev); + } #if 0 if (dev_priv->ring.virtual_start) { @@ -298,25 +308,33 @@ int i915_driver_unload(struct drm_device *dev) I915_WRITE(I915REG_HWS_PGA, 0x1ffff000); } - drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, - dev_priv->ring.virtual_start); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, + dev_priv->ring.virtual_start); - DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage)); - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(&dev_priv->ring_buffer); + DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage)); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->ring_buffer); - if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { - DRM_ERROR("Memory manager type 3 not clean. " - "Delaying takedown\n"); - } - if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { - DRM_ERROR("Memory manager type 3 not clean. " - "Delaying takedown\n"); + if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + mutex_unlock(&dev->struct_mutex); } - mutex_unlock(&dev->struct_mutex); drm_bo_driver_finish(dev); +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + DRM_DEBUG("%p\n", dev_priv->mmio_map); drm_rmmap(dev, dev_priv->mmio_map); @@ -363,3 +381,23 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } + +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + i915_mem_release(dev, file_priv, dev_priv->agp_heap); +} + +void i915_driver_lastclose(struct drm_device * dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (dev_priv->agp_heap) + i915_mem_takedown(&(dev_priv->agp_heap)); + + i915_dma_cleanup(dev); +}