From: Jesse Barnes Date: Wed, 11 Jun 2008 17:25:45 +0000 (-0700) Subject: Merge commit 'origin/drm-gem' into modesetting-gem X-Git-Tag: libdrm-2.4.3~16^2~134 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f5412a944fa4666e25f4fa27b6ed85c21ccb65a0;p=platform%2Fupstream%2Flibdrm.git Merge commit 'origin/drm-gem' into modesetting-gem Use new GEM based ring buffer initialization. Still need to init GEM & use it for framebuffer allocation etc. Conflicts: shared-core/i915_dma.c shared-core/i915_drv.h --- f5412a944fa4666e25f4fa27b6ed85c21ccb65a0 diff --cc linux-core/i915_gem.c index b287089,2564f41..961831c --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@@ -1383,6 -1389,7 +1389,7 @@@ in i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; @@@ -1757,3 -1793,173 +1793,173 @@@ i915_gem_lastclose(struct drm_device *d mutex_unlock(&dev->struct_mutex); } + -static int ++int + i915_gem_init_ringbuffer(struct drm_device *dev) + { - drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + obj = drm_gem_object_alloc(dev, 128 * 1024); + if (obj == NULL) { + DRM_ERROR("Failed to allocate ringbuffer\n"); + return -ENOMEM; + } + obj_priv = obj->driver_private; + + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) + return ret; + + /* Set up the kernel mapping for the ring. */ + dev_priv->ring.Size = obj->size; + dev_priv->ring.tail_mask = obj->size - 1; + + dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; + dev_priv->ring.map.size = obj->size; + dev_priv->ring.map.type = 0; + dev_priv->ring.map.flags = 0; + dev_priv->ring.map.mtrr = 0; + + drm_core_ioremap(&dev_priv->ring.map, dev); + if (dev_priv->ring.map.handle == NULL) { + DRM_ERROR("Failed to map ringbuffer.\n"); + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + drm_gem_object_unreference(obj); + return -EINVAL; + } + dev_priv->ring.ring_obj = obj; + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + + /* Stop the ring if it's running. */ + I915_WRITE(LP_RING + RING_LEN, 0); + I915_WRITE(LP_RING + RING_HEAD, 0); + I915_WRITE(LP_RING + RING_TAIL, 0); + I915_WRITE(LP_RING + RING_START, 0); + + /* Initialize the ring. */ + I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset); + I915_WRITE(LP_RING + RING_LEN, + ((obj->size - 4096) & RING_NR_PAGES) | + RING_NO_REPORT | + RING_VALID); + + /* Update our cache of the ring state */ + i915_kernel_lost_context(dev); + + return 0; + } + + static void + i915_gem_cleanup_ringbuffer(struct drm_device *dev) + { - drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->ring.ring_obj == NULL) + return; + + drm_core_ioremapfree(&dev_priv->ring.map, dev); + + i915_gem_object_unpin(dev_priv->ring.ring_obj); + drm_gem_object_unreference(dev_priv->ring.ring_obj); + + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + } + + int + i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { - drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_gem_init_ringbuffer(dev); + if (ret != 0) + return ret; + + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + mutex_unlock(&dev->struct_mutex); + return 0; + } + + /** Unbinds all objects that are on the given buffer list. */ + static int + i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) + { + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + while (!list_empty(head)) { + obj_priv = list_first_entry(head, + struct drm_i915_gem_object, + list); + obj = obj_priv->obj; + + if (obj_priv->pin_count != 0) { + DRM_ERROR("Pinned object in unbind list\n"); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + ret = i915_gem_object_unbind(obj); + if (ret != 0) { + DRM_ERROR("Error unbinding object in LeaveVT: %d\n", + ret); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + + return 0; + } + + int + i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { - drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev->struct_mutex); + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + */ + dev_priv->mm.suspended = 1; + + /* Move all buffers out of the GTT. */ + i915_gem_evict_from_list(dev, &dev_priv->mm.active_list); + i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list); + i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + + /* Make sure the harware's idle. */ + while (!list_empty(&dev_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + int ret; + + request = list_first_entry(&dev_priv->mm.request_list, + struct drm_i915_gem_request, + list); + + ret = i915_wait_request(dev, request->seqno); + if (ret != 0) { + DRM_ERROR("Error waiting for idle at LeaveVT: %d\n", + ret); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); + + i915_gem_cleanup_ringbuffer(dev); + + mutex_unlock(&dev->struct_mutex); + + return 0; + } diff --cc shared-core/i915_dma.c index 86881ab,4243b4e..1e51e70 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@@ -227,25 -243,30 +227,21 @@@ static int i915_initialize(struct drm_d } #ifdef I915_HAVE_BUFFER - dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; -#endif - - if (init->sarea_priv_offset) - dev_priv->sarea_priv = (drm_i915_sarea_t *) - ((u8 *) dev_priv->sarea->handle + - init->sarea_priv_offset); - else { - /* No sarea_priv for you! */ - dev_priv->sarea_priv = NULL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; } +#endif - if (!dev_priv->ring.Size) { - dev_priv->ring.Start = init->ring_start; - dev_priv->ring.End = init->ring_end; + if (init->ring_size != 0) { dev_priv->ring.Size = init->ring_size; dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; - - dev_priv->ring.map.offset = init->ring_start; dev_priv->ring.map.size = init->ring_size; dev_priv->ring.map.type = 0; dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; - - drm_core_ioremap(&dev_priv->ring.map, dev); - + if (dev_priv->ring.map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" @@@ -255,9 -276,11 +251,8 @@@ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; } - dev_priv->cpp = init->cpp; - - if (dev_priv->sarea_priv) - dev_priv->sarea_priv->pf_current_page = 0; + master_priv->sarea_priv->pf_current_page = 0; /* We are using separate values as placeholders for mechanisms for * private backbuffer/depthbuffer usage. diff --cc shared-core/i915_drv.h index eab51e3,3a22ae3..cfb064f --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@@ -79,17 -72,16 +79,16 @@@ enum pipe struct drm_i915_validate_buffer; #endif -typedef struct _drm_i915_ring_buffer { +struct drm_i915_ring_buffer { int tail_mask; - unsigned long Start; - unsigned long End; unsigned long Size; u8 *virtual_start; int head; int tail; int space; drm_local_map_t map; + struct drm_gem_object *ring_obj; -} drm_i915_ring_buffer_t; +}; struct mem_block { struct mem_block *next; @@@ -181,65 -152,7 +180,75 @@@ struct drm_i915_private /* DRI2 sarea */ struct drm_buffer_object *sarea_bo; struct drm_bo_kmap_obj sarea_kmap; + + /* Feature bits from the VBIOS */ + int int_tv_support:1; + int lvds_dither:1; + int lvds_vbt:1; + int int_crt_support:1; #endif + + struct { + struct drm_memrange gtt_space; + + /** + * List of objects currently involved in rendering from the + * ringbuffer. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * List of objects which are not in the ringbuffer but which + * still have a write_domain which needs to be flushed before + * unbinding. + * + * A reference is held on the buffer while on this list. + */ + struct list_head flushing_list; + + /** + * LRU list of objects which are not in the ringbuffer and + * are ready to unbind, but are still in the GTT. + * + * A reference is not held on the buffer while on this list, + * as merely being GTT-bound shouldn't prevent its being + * freed, and we'll pull it off the list in the free path. + */ + struct list_head inactive_list; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head request_list; + + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct timer_list retire_timer; + struct work_struct retire_task; + + uint32_t next_gem_seqno; ++ ++ /** ++ * Flag if the X Server, and thus DRM, is not currently in ++ * control of the device. ++ * ++ * This is set between LeaveVT and EnterVT. It needs to be ++ * replaced with a semaphore. It also needs to be ++ * transitioned away from for kernel modesetting. ++ */ ++ int suspended; + } mm; + + struct work_struct user_interrupt_task; + /* Register state */ u8 saveLBB; u32 saveDSPACNTR; @@@ -514,10 -489,8 +529,11 @@@ void i915_gem_lastclose(struct drm_devi void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_timeout(unsigned long data); void i915_gem_retire_handler(struct work_struct *work); ++int i915_gem_init_ringbuffer(struct drm_device *dev); #endif +extern unsigned int i915_fbpercrtc; + #ifdef __linux__ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) extern void intel_init_chipset_flush_compat(struct drm_device *dev); diff --cc shared-core/i915_init.c index e13d12f,0000000..ba02f48 mode 100644,000000..100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@@ -1,562 -1,0 +1,522 @@@ +/* + * Copyright (c) 2007 Intel Corporation + * Jesse Barnes + * + * Copyright © 2002, 2003 David Dawes + * 2004 Sylvain Meyer + * + * GPL/BSD dual license + */ +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "intel_bios.h" +#include "intel_drv.h" + +/** + * i915_probe_agp - get AGP bootup configuration + * @pdev: PCI device + * @aperture_size: returns AGP aperture configured size + * @preallocated_size: returns size of BIOS preallocated AGP space + * + * Since Intel integrated graphics are UMA, the BIOS has to set aside + * some RAM for the framebuffer at early boot. This code figures out + * how much was set aside so we can use it for our own purposes. + */ +int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size, + unsigned long *preallocated_size) +{ + struct pci_dev *bridge_dev; + u16 tmp = 0; + unsigned long overhead; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_ERROR("bridge device not found\n"); + return -1; + } + + /* Get the fb aperture size and "stolen" memory amount. */ + pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); + pci_dev_put(bridge_dev); + + *aperture_size = 1024 * 1024; + *preallocated_size = 1024 * 1024; + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_82830_CGC: + case PCI_DEVICE_ID_INTEL_82845G_IG: + case PCI_DEVICE_ID_INTEL_82855GM_IG: + case PCI_DEVICE_ID_INTEL_82865_IG: + if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) + *aperture_size *= 64; + else + *aperture_size *= 128; + break; + default: + /* 9xx supports large sizes, just look at the length */ + *aperture_size = pci_resource_len(pdev, 2); + break; + } + + /* + * Some of the preallocated space is taken by the GTT + * and popup. GTT is 1K per MB of aperture size, and popup is 4K. + */ + overhead = (*aperture_size / 1024) + 4096; + switch (tmp & INTEL_855_GMCH_GMS_MASK) { + case INTEL_855_GMCH_GMS_STOLEN_1M: + break; /* 1M already */ + case INTEL_855_GMCH_GMS_STOLEN_4M: + *preallocated_size *= 4; + break; + case INTEL_855_GMCH_GMS_STOLEN_8M: + *preallocated_size *= 8; + break; + case INTEL_855_GMCH_GMS_STOLEN_16M: + *preallocated_size *= 16; + break; + case INTEL_855_GMCH_GMS_STOLEN_32M: + *preallocated_size *= 32; + break; + case INTEL_915G_GMCH_GMS_STOLEN_48M: + *preallocated_size *= 48; + break; + case INTEL_915G_GMCH_GMS_STOLEN_64M: + *preallocated_size *= 64; + break; + case INTEL_855_GMCH_GMS_DISABLED: + DRM_ERROR("video memory is disabled\n"); + return -1; + default: + DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", + tmp & INTEL_855_GMCH_GMS_MASK); + return -1; + } + *preallocated_size -= overhead; + + return 0; +} + +int i915_load_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long agp_size, prealloc_size; + int size, ret = 0; + + i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); + printk("setting up %ld bytes of VRAM space\n", prealloc_size); + printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size)); - - drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1); - drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, - (agp_size - prealloc_size) >> PAGE_SHIFT, 1); - I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_HEAD, 0); - I915_WRITE(PRB0_TAIL, 0); - - size = PRIMARY_RINGBUFFER_SIZE; - ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel, - DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, 0x1, 0, - &dev_priv->ring_buffer); - if (ret < 0) { - DRM_ERROR("Unable to allocate or pin ring buffer\n"); - goto clean_mm; - } - - /* remap the buffer object properly */ - dev_priv->ring.Start = dev_priv->ring_buffer->offset; - dev_priv->ring.End = dev_priv->ring.Start + size; - dev_priv->ring.Size = size; - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; - - /* FIXME: need wrapper with PCI mem checks */ - ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem, - (void **) &dev_priv->ring.virtual_start); - if (ret) { - DRM_ERROR("error mapping ring buffer: %d\n", ret); - goto destroy_ringbuffer; - } - - DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start, - dev_priv->ring.virtual_start, dev_priv->ring.Size); - - memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); - I915_WRITE(PRB0_START, dev_priv->ring.Start); - I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | - (RING_NO_REPORT | RING_VALID)); ++ ret = i915_gem_init_ringbuffer(dev); ++ if (ret) ++ goto out; + + /* Allow hardware batchbuffers unless told otherwise. + */ + dev_priv->allow_batchbuffer = 1; + dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; + mutex_init(&dev_priv->cmdbuf_mutex); + + /* Program Hardware Status Page */ + if (!IS_G33(dev)) { + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + DRM_ERROR("Can not allocate hardware status page\n"); + ret = -ENOMEM; + goto destroy_ringbuffer; + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(HWS_PGA, dev_priv->dma_status_page); + } else { + size = 4 * 1024; + ret = drm_buffer_object_create(dev, size, + drm_bo_type_kernel, + DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_MEM_VRAM | + DRM_BO_FLAG_NO_EVICT, + DRM_BO_HINT_DONT_FENCE, 0x1, 0, + &dev_priv->hws_bo); + if (ret < 0) { + DRM_ERROR("Unable to allocate or pin hw status page\n"); + ret = -EINVAL; + goto destroy_ringbuffer; + } + + dev_priv->status_gfx_addr = + dev_priv->hws_bo->offset & (0x1ffff << 12); + dev_priv->hws_map.offset = dev->agp->base + + dev_priv->hws_bo->offset; + dev_priv->hws_map.size = size; + dev_priv->hws_map.type= 0; + dev_priv->hws_map.flags= 0; + dev_priv->hws_map.mtrr = 0; + + drm_core_ioremap(&dev_priv->hws_map, dev); + if (dev_priv->hws_map.handle == NULL) { + dev_priv->status_gfx_addr = 0; + DRM_ERROR("can not ioremap virtual addr for" + "G33 hw status page\n"); + ret = -ENOMEM; + goto destroy_hws; + } + dev_priv->hw_status_page = dev_priv->hws_map.handle; + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + } + DRM_DEBUG("Enabled hardware status page\n"); + + dev_priv->wq = create_singlethread_workqueue("i915"); + if (dev_priv->wq == 0) { + DRM_DEBUG("Error\n"); + ret = -EINVAL; + goto destroy_hws; + } + + ret = intel_init_bios(dev); + if (ret) { + DRM_ERROR("failed to find VBIOS tables\n"); + ret = -ENODEV; + goto destroy_wq; + } + + intel_modeset_init(dev); + drm_helper_initial_config(dev, false); + + dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); + if (!dev->devname) { + ret = -ENOMEM; + goto modeset_cleanup; + } + + ret = drm_irq_install(dev); + if (ret) { + kfree(dev->devname); + goto modeset_cleanup; + } + return 0; + +modeset_cleanup: + intel_modeset_cleanup(dev); +destroy_wq: + destroy_workqueue(dev_priv->wq); +destroy_hws: + if (!IS_G33(dev)) { + if (dev_priv->status_page_dmah) + drm_pci_free(dev, dev_priv->status_page_dmah); + } else { + if (dev_priv->hws_map.handle) + drm_core_ioremapfree(&dev_priv->hws_map, dev); + if (dev_priv->hws_bo) + drm_bo_usage_deref_unlocked(&dev_priv->hws_bo); + } + I915_WRITE(HWS_PGA, 0x1ffff000); +destroy_ringbuffer: + if (dev_priv->ring.virtual_start) + drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, + dev_priv->ring.virtual_start); + if (dev_priv->ring_buffer) + drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer); - clean_mm: - drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1); - drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1); ++out: + return ret; +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct drm_i915_private *dev_priv; + int ret = 0; + + dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER); + if (dev_priv == NULL) + return -ENOMEM; + + memset(dev_priv, 0, sizeof(struct drm_i915_private)); + dev->dev_private = (void *)dev_priv; + dev_priv->dev = dev; + + /* i915 has 4 more counters */ + dev->counters += 4; + dev->types[6] = _DRM_STAT_IRQ; + dev->types[7] = _DRM_STAT_PRIMARY; + dev->types[8] = _DRM_STAT_SECONDARY; + dev->types[9] = _DRM_STAT_DMA; + + if (IS_MOBILE(dev) || IS_I9XX(dev)) + dev_priv->cursor_needs_physical = true; + else + dev_priv->cursor_needs_physical = false; + + if (IS_I965G(dev) || IS_G33(dev)) + dev_priv->cursor_needs_physical = false; + + if (IS_I9XX(dev)) { + pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base); + DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base); + } + + if (IS_I9XX(dev)) { + dev_priv->mmiobase = drm_get_resource_start(dev, 0); + dev_priv->mmiolen = drm_get_resource_len(dev, 0); + dev->mode_config.fb_base = + drm_get_resource_start(dev, 2) & 0xff000000; + } else if (drm_get_resource_start(dev, 1)) { + dev_priv->mmiobase = drm_get_resource_start(dev, 1); + dev_priv->mmiolen = drm_get_resource_len(dev, 1); + dev->mode_config.fb_base = + drm_get_resource_start(dev, 0) & 0xff000000; + } else { + DRM_ERROR("Unable to find MMIO registers\n"); + ret = -ENODEV; + goto free_priv; + } + + DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base); + + ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen, + _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER, + &dev_priv->mmio_map); + if (ret != 0) { + DRM_ERROR("Cannot add mapping for MMIO registers\n"); + goto free_priv; + } + + INIT_LIST_HEAD(&dev_priv->mm.active_list); + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.request_list); + dev_priv->mm.retire_timer.function = i915_gem_retire_timeout; + dev_priv->mm.retire_timer.data = (unsigned long) dev; + init_timer_deferrable (&dev_priv->mm.retire_timer); + INIT_WORK(&dev_priv->mm.retire_task, + i915_gem_retire_handler); + INIT_WORK(&dev_priv->user_interrupt_task, + i915_user_interrupt_handler); + dev_priv->mm.next_gem_seqno = 1; + +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* + * Initialize the memory manager for local and AGP space + */ + ret = drm_bo_driver_init(dev); + if (ret) { + DRM_ERROR("fail to init memory manager for " + "local & AGP space\n"); + goto out_rmmap; + } + + ret = i915_load_modeset_init(dev); + if (ret < 0) { + DRM_ERROR("failed to init modeset\n"); + goto driver_fini; + } + } + return 0; + +driver_fini: + drm_bo_driver_finish(dev); +out_rmmap: + drm_rmmap(dev, dev_priv->mmio_map); +free_priv: + drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER); + return ret; +} + +int i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(PRB0_CTL, 0); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_irq_uninstall(dev); + intel_modeset_cleanup(dev); + destroy_workqueue(dev_priv->wq); + } + +#if 0 + if (dev_priv->ring.virtual_start) { + drm_core_ioremapfree(&dev_priv->ring.map, dev); + } +#endif + if (dev_priv->sarea_kmap.virtual) { + drm_bo_kunmap(&dev_priv->sarea_kmap); + dev_priv->sarea_kmap.virtual = NULL; + dev->sigdata.lock = NULL; + } + + if (dev_priv->sarea_bo) { + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->sarea_bo); + mutex_unlock(&dev->struct_mutex); + dev_priv->sarea_bo = NULL; + } + + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + dev_priv->hw_status_page = NULL; + dev_priv->dma_status_page = 0; + /* Need to rewrite hardware status page */ + I915_WRITE(HWS_PGA, 0x1ffff000); + } + + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + drm_bo_usage_deref_unlocked(&dev_priv->hws_bo); + I915_WRITE(HWS_PGA, 0x1ffff000); + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, + dev_priv->ring.virtual_start); + + DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage)); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->ring_buffer); + + if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + mutex_unlock(&dev->struct_mutex); + } + + drm_bo_driver_finish(dev); + +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + + DRM_DEBUG("%p\n", dev_priv->mmio_map); + drm_rmmap(dev, dev_priv->mmio_map); + + drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); + + dev->dev_private = NULL; + return 0; +} + +int i915_master_create(struct drm_device *dev, struct drm_master *master) +{ + struct drm_i915_master_private *master_priv; + unsigned long sareapage; + int ret; + + master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER); + if (!master_priv) + return -ENOMEM; + + /* prebuild the SAREA */ + sareapage = max(SAREA_MAX, PAGE_SIZE); + ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, + &master_priv->sarea); + if (ret) { + DRM_ERROR("SAREA setup failed\n"); + return ret; + } + master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); + master_priv->sarea_priv->pf_current_page = 0; + + master->driver_priv = master_priv; + return 0; +} + +void i915_master_destroy(struct drm_device *dev, struct drm_master *master) +{ + struct drm_i915_master_private *master_priv = master->driver_priv; + + if (!master_priv) + return; + + if (master_priv->sarea) + drm_rmmap(dev, master_priv->sarea); + + drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER); + + master->driver_priv = NULL; +} + +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_mem_release(dev, file_priv, dev_priv->agp_heap); +} + +void i915_driver_lastclose(struct drm_device * dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + +#ifdef I915_HAVE_BUFFER + if (dev_priv->val_bufs) { + vfree(dev_priv->val_bufs); + dev_priv->val_bufs = NULL; + } +#endif + + i915_gem_lastclose(dev); + + if (dev_priv->agp_heap) + i915_mem_takedown(&(dev_priv->agp_heap)); + +#if defined(I915_HAVE_BUFFER) + if (dev_priv->sarea_kmap.virtual) { + drm_bo_kunmap(&dev_priv->sarea_kmap); + dev_priv->sarea_kmap.virtual = NULL; + dev->control->master->lock.hw_lock = NULL; + dev->sigdata.lock = NULL; + } + + if (dev_priv->sarea_bo) { + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->sarea_bo); + mutex_unlock(&dev->struct_mutex); + dev_priv->sarea_bo = NULL; + } +#endif + + i915_dma_cleanup(dev); +} + +int i915_driver_firstopen(struct drm_device *dev) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + + drm_bo_driver_init(dev); + return 0; +} diff --cc shared-core/i915_irq.c index d36a369,0125f8b..bd11d37 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@@ -664,20 -556,15 +664,20 @@@ void i915_user_irq_on(struct drm_devic DRM_SPINUNLOCK(&dev_priv->user_irq_lock); } - -void i915_user_irq_off(drm_i915_private_t *dev_priv) + +void i915_user_irq_off(struct drm_device *dev) { + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; + DRM_SPINLOCK(&dev_priv->user_irq_lock); - BUG_ON(dev_priv->user_irq_refcount <= 0); + BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0); if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { dev_priv->irq_mask_reg |= I915_USER_INTERRUPT; - I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg); - (void) I915_READ(I915REG_INT_MASK_R); + if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) + I915_WRITE(IMR, dev_priv->irq_mask_reg); + else + I915_WRITE16(IMR, dev_priv->irq_mask_reg); + I915_READ16(IMR); } DRM_SPINUNLOCK(&dev_priv->user_irq_lock); } @@@ -685,10 -572,14 +685,15 @@@ int i915_wait_irq(struct drm_device * dev, int irq_nr) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; + struct drm_i915_master_private *master_priv; int ret = 0; + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv));