Lots of conflicts, seems to load ok, but I'm sure some bugs snuck in.
Conflicts:
linux-core/drmP.h
linux-core/drm_lock.c
linux-core/i915_gem.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
--
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0),
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
-- * Removes the mapping from handle to filp for this object.
-- */
--static int
--drm_gem_handle_delete(struct drm_file *filp, int handle)
--{
-- struct drm_device *dev;
-- struct drm_gem_object *obj;
--
-- /* This is gross. The idr system doesn't let us try a delete and
-- * return an error code. It just spews if you fail at deleting.
-- * So, we have to grab a lock around finding the object and then
-- * doing the delete on it and dropping the refcount, or the user
-- * could race us to double-decrement the refcount and cause a
-- * use-after-free later. Given the frequency of our handle lookups,
-- * we may want to use ida for number allocation and a hash table
-- * for the pointers, anyway.
-- */
-- spin_lock(&filp->table_lock);
--
-- /* Check if we currently have a reference on the object */
-- obj = idr_find(&filp->object_idr, handle);
-- if (obj == NULL) {
-- spin_unlock(&filp->table_lock);
-- return -EINVAL;
-- }
-- dev = obj->dev;
--
-- /* Release reference and decrement refcount. */
-- idr_remove(&filp->object_idr, handle);
-- spin_unlock(&filp->table_lock);
--
-- mutex_lock(&dev->struct_mutex);
-- drm_gem_object_handle_unreference(obj);
-- mutex_unlock(&dev->struct_mutex);
--
-- return 0;
--}
--
--/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
- * Creates a new mm object and returns a handle to it.
- */
- int
- drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_gem_create *args = data;
- struct drm_gem_object *obj;
- int handle, ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- args->size = roundup(args->size, PAGE_SIZE);
-
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- if (ret)
- return ret;
-
- args->handle = handle;
-
- return 0;
- }
-
- /**
-- * Releases the handle to an mm object.
-- */
--int
--drm_gem_close_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv)
--{
-- struct drm_gem_close *args = data;
-- int ret;
--
-- if (!(dev->driver->driver_features & DRIVER_GEM))
-- return -ENODEV;
--
-- ret = drm_gem_handle_delete(file_priv, args->handle);
--
-- return ret;
- }
-
- /**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
- int
- drm_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_gem_pread *args = data;
- struct drm_gem_object *obj;
- ssize_t read;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
-
- /**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
- int
- drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_gem_mmap *args = data;
- struct drm_gem_object *obj;
- loff_t offset;
- unsigned long addr;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- down_write(¤t->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- up_write(¤t->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR((void *)addr))
- return addr;
-
- args->addr_ptr = (uint64_t) addr;
-
- return 0;
- }
-
- /**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
- int
- drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- ssize_t written;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- DRM_GEM_DOMAIN_CPU);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
-
- if (written != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
- }
-
- if (dev->driver->gem_flush_pwrite)
- dev->driver->gem_flush_pwrite(obj,
- args->offset,
- args->size);
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
--}
--
--/**
-- * Create a global name for an object, returning the name.
-- *
-- * Note that the name does not hold a reference; when the object
-- * is freed, the name goes away.
-- */
--int
--drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv)
--{
-- struct drm_gem_flink *args = data;
-- struct drm_gem_object *obj;
-- int ret;
--
-- if (!(dev->driver->driver_features & DRIVER_GEM))
-- return -ENODEV;
--
-- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-- if (obj == NULL)
-- return -EINVAL;
--
--again:
-- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
-- return -ENOMEM;
--
-- spin_lock(&dev->object_name_lock);
-- if (obj->name) {
-- spin_unlock(&dev->object_name_lock);
-- return -EEXIST;
-- }
-- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
-- &obj->name);
-- spin_unlock(&dev->object_name_lock);
-- if (ret == -EAGAIN)
-- goto again;
--
-- if (ret != 0) {
-- mutex_lock(&dev->struct_mutex);
-- drm_gem_object_unreference(obj);
-- mutex_unlock(&dev->struct_mutex);
-- return ret;
-- }
--
-- /*
-- * Leave the reference from the lookup around as the
-- * name table now holds one
-- */
-- args->name = (uint64_t) obj->name;
--
-- return 0;
--}
--
--/**
-- * Open an object using the global name, returning a handle and the size.
-- *
-- * This handle (of course) holds a reference to the object, so the object
-- * will not go away until the handle is deleted.
-- */
--int
--drm_gem_open_ioctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv)
--{
-- struct drm_gem_open *args = data;
-- struct drm_gem_object *obj;
-- int ret;
-- int handle;
--
-- if (!(dev->driver->driver_features & DRIVER_GEM))
-- return -ENODEV;
--
-- spin_lock(&dev->object_name_lock);
-- obj = idr_find(&dev->object_name_idr, (int) args->name);
-- if (obj)
-- drm_gem_object_reference(obj);
-- spin_unlock(&dev->object_name_lock);
-- if (!obj)
-- return -ENOENT;
--
-- ret = drm_gem_handle_create(file_priv, obj, &handle);
-- mutex_lock(&dev->struct_mutex);
-- drm_gem_object_unreference(obj);
-- mutex_unlock(&dev->struct_mutex);
-- if (ret)
-- return ret;
--
-- args->handle = handle;
-- args->size = obj->size;
--
-- return 0;
- }
-
- /**
- * Called when user space prepares to use an object
- */
- int
- drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- args->read_domains,
- args->write_domain);
- } else {
- obj->read_domains = args->read_domains;
- obj->write_domain = args->write_domain;
- ret = 0;
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
--}
--
--/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->irq)
+ ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
}
EXPORT_SYMBOL(drm_idlelock_release);
- /**
- * Takes the lock on behalf of the client if needed, using the kernel context.
- *
- * This allows us to hide the hardware lock when it's required for protection
- * of data structures (such as command ringbuffer) shared with the X Server, an
-
- * a way for us to transition to lockless for those requests when the X Server
- * stops accessing the ringbuffer directly, without having to update the
- * other userland clients.
- */
- int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
- {
- struct drm_master *master = file_priv->master;
- int ret;
- unsigned long irqflags;
-
- /* If the client has the lock, we're already done. */
- if (drm_i_have_hw_lock(dev, file_priv))
- return 0;
-
- mutex_unlock (&dev->struct_mutex);
- /* Client doesn't hold the lock. Block taking the lock with the kernel
- * context on behalf of the client, and return whether we were
- * successful.
- */
- spin_lock_irqsave(&master->lock.spinlock, irqflags);
- master->lock.user_waiters++;
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- ret = wait_event_interruptible(master->lock.lock_queue,
- drm_lock_take(&master->lock,
- DRM_KERNEL_CONTEXT));
- spin_lock_irqsave(&master->lock.spinlock, irqflags);
- master->lock.user_waiters--;
- if (ret != 0) {
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- } else {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- master->lock.kernel_held = 1;
- file_priv->lock_count++;
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- }
- mutex_lock (&dev->struct_mutex);
- return ret;
- }
- EXPORT_SYMBOL(drm_client_lock_take);
-
- void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv)
- {
- struct drm_master *master = file_priv->master;
-
- if (master->lock.kernel_held) {
- master->lock.kernel_held = 0;
- master->lock.file_priv = NULL;
- drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT);
- }
- }
- EXPORT_SYMBOL(drm_client_lock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
-
- return (file_priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.file_priv == file_priv);
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+ master->lock.file_priv == file_priv);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
+ .remove = remove,
},
-#ifdef I915_HAVE_FENCE
+#if defined(I915_HAVE_FENCE) && defined(I915_TTM)
.fence_driver = &i915_fence_driver,
#endif
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
.bo_driver = &i915_bo_driver,
#endif
.name = DRIVER_NAME,
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+ int
+ i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
-int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- mutex_lock(&dev->struct_mutex);
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
- (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (start >= end ||
+ (start & (PAGE_SIZE - 1)) != 0 ||
+ (end & (PAGE_SIZE - 1)) != 0) {
return -EINVAL;
}
- drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
- args->gtt_end - args->gtt_start);
+ drm_memrange_init(&dev_priv->mm.gtt_space, start,
+ end - start);
+
++ dev->gtt_total = (uint32_t) (end - start);
++
+ return 0;
+}
- dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_init *args = data;
+ int ret;
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
+
+ /**
+ * Creates a new mm object and returns a handle to it.
+ */
+ int
+ i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+ }
+
+ /**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+ int
+ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ /**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+ int
+ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ ssize_t written;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+
+ if (written != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ /**
+ * Called when user space prepares to use an object
+ */
+ int
+ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ args->read_domains, args->write_domain);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+ int
+ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ down_write(¤t->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(¤t->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+ }
+
static void
i915_gem_object_free_page_list(struct drm_gem_object *obj)
{
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
list_del_init(&obj_priv->list);
else
static uint32_t
i915_add_request(struct drm_device *dev, uint32_t flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
uint32_t seqno;
+ int was_empty;
RING_LOCALS;
request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
}
void
- i915_gem_retire_timeout(unsigned long data)
- {
- struct drm_device *dev = (struct drm_device *) data;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- schedule_work(&dev_priv->mm.retire_task);
- }
-
- void
- i915_gem_retire_handler(struct work_struct *work)
+ i915_gem_retire_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv;
+ struct drm_i915_private *dev_priv;
struct drm_device *dev;
- dev_priv = container_of(work, drm_i915_private_t,
+ dev_priv = container_of(work, struct drm_i915_private,
- mm.retire_task);
+ mm.retire_work.work);
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno));
- i915_user_irq_off(dev_priv);
+ i915_user_irq_off(dev);
}
if (ret)
- DRM_ERROR ("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* are flushed at any MI_FLUSH.
*/
- cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
if ((invalidate_domains|flush_domains) &
- DRM_GEM_DOMAIN_I915_RENDER)
+ I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
if (!IS_I965G(dev)) {
/*
static int
i915_gem_evict_something(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
for (;;) {
/* If there's an inactive buffer available now, grab it
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
void
i915_gem_object_unpin(struct drm_gem_object *obj)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = obj->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
+ BUG_ON(obj_priv->pin_count < 0);
+ BUG_ON(obj_priv->gtt_space == NULL);
+
+ /* If the object is no longer pinned, and is
+ * neither active nor being flushed, then stick it on
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+ if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.inactive_list);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->size, &dev->pin_memory);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
}
int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
mutex_lock(&dev->struct_mutex);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
uint64_t p_size;
};
- struct drm_gem_create {
- /**
- * Requested size for the object.
- *
- * The (page-aligned) allocated size for the object will be returned.
- */
- uint64_t size;
- /**
- * Returned handle for the object.
- *
- * Object handles are nonzero.
- */
- uint32_t handle;
- uint32_t pad;
- };
-
--struct drm_gem_close {
-- /** Handle of the object to be closed. */
- uint32_t handle;
- uint32_t pad;
- };
-
- struct drm_gem_pread {
- /** Handle for the object being read. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to read from */
- uint64_t offset;
- /** Length of data to read */
- uint64_t size;
- /** Pointer to write the data into. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
- };
-
- struct drm_gem_pwrite {
- /** Handle for the object being written to. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to write to */
- uint64_t offset;
- /** Length of data to write */
- uint64_t size;
- /** Pointer to read the data from. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
- };
-
- struct drm_gem_mmap {
- /** Handle for the object being mapped. */
- uint32_t handle;
- uint32_t pad;
- /** Offset in the object to map. */
- uint64_t offset;
- /**
- * Length of data to map.
- *
- * The value will be page-aligned.
- */
- uint64_t size;
- /** Returned pointer the data was mapped at */
- uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
- };
-
- struct drm_gem_flink {
- /** Handle for the object being named */
- uint32_t handle;
- /** Returned global name */
- uint32_t name;
- };
-
- struct drm_gem_open {
- /** Name of object being opened */
- uint32_t name;
- /** Returned handle for the object */
- uint32_t handle;
- /** Returned size of the object */
- uint64_t size;
- };
-
+struct drm_gem_set_domain {
+ /** Handle for the object */
uint32_t handle;
- uint32_t pad;
+ /** New read domains */
+ uint32_t read_domains;
+ /** New write domain */
+ uint32_t write_domain;
};
+#define DRM_GEM_DOMAIN_CPU 0x00000001
-struct drm_gem_flink {
- /** Handle for the object being named */
- uint32_t handle;
+/*
+ * Drm mode setting
+ */
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+struct drm_mode_modeinfo {
+ unsigned int clock;
+ unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
+ unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ unsigned int vrefresh; /* vertical refresh * 1000 */
- /** Returned global name */
- uint32_t name;
+ unsigned int flags;
+ unsigned int type;
+ char name[DRM_DISPLAY_MODE_LEN];
};
-struct drm_gem_open {
- /** Name of object being opened */
- uint32_t name;
+struct drm_mode_card_res {
+ uint64_t fb_id_ptr;
+ uint64_t crtc_id_ptr;
+ uint64_t connector_id_ptr;
+ uint64_t encoder_id_ptr;
+ int count_fbs;
+ int count_crtcs;
+ int count_connectors;
+ int count_encoders;
+ int min_width, max_width;
+ int min_height, max_height;
+ uint32_t generation;
+};
- /** Returned handle for the object */
- uint32_t handle;
-
- /** Returned size of the object */
- uint64_t size;
+struct drm_mode_crtc {
+ uint64_t set_connectors_ptr;
+
+ unsigned int crtc_id; /**< Id */
+ unsigned int fb_id; /**< Id of framebuffer */
+
+ int x, y; /**< Position on the frameuffer */
+
+ uint32_t generation;
+
+ int count_connectors;
+ unsigned int connectors; /**< Connectors that are connected */
+
+ int count_possibles;
+ unsigned int possibles; /**< Connectors that can be connected */
+ uint32_t gamma_size;
+ int mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+struct drm_mode_get_encoder {
+
+ uint32_t generation;
+
+ uint32_t encoder_type;
+ uint32_t encoder_id;
+
+ unsigned int crtc; /**< Id of crtc */
+ uint32_t crtcs;
+ uint32_t clones;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+
+struct drm_mode_get_connector {
+
+ uint64_t encoders_ptr;
+ uint64_t modes_ptr;
+ uint64_t props_ptr;
+ uint64_t prop_values_ptr;
+
+ int count_modes;
+ int count_props;
+ int count_encoders;
+
+ unsigned int encoder; /**< Current Encoder */
+ unsigned int connector; /**< Id */
+ unsigned int connector_type;
+ unsigned int connector_type_id;
+
+ uint32_t generation;
+
+ unsigned int connection;
+ unsigned int mm_width, mm_height; /**< HxW in millimeters */
+ unsigned int subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
+#define DRM_MODE_PROP_BLOB (1<<4)
+
+struct drm_mode_property_enum {
+ uint64_t value;
+ unsigned char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ uint64_t values_ptr; /* values and blob lengths */
+ uint64_t enum_blob_ptr; /* enum and blob id ptrs */
+
+ unsigned int prop_id;
+ unsigned int flags;
+ unsigned char name[DRM_PROP_NAME_LEN];
+
+ int count_values;
+ int count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ uint64_t value;
+ unsigned int prop_id;
+ unsigned int connector_id;
+};
+
+struct drm_mode_get_blob {
+ uint32_t blob_id;
+ uint32_t length;
+ uint64_t data;
+};
+
+struct drm_mode_fb_cmd {
+ unsigned int buffer_id;
+ unsigned int width, height;
+ unsigned int pitch;
+ unsigned int bpp;
+ unsigned int handle;
+ unsigned int depth;
+};
+
+struct drm_mode_mode_cmd {
+ unsigned int connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+
+/*
+ * depending on the value in flags diffrent members are used.
+ *
+ * CURSOR_BO uses
+ * crtc
+ * width
+ * height
+ * handle - if 0 turns the cursor of
+ *
+ * CURSOR_MOVE uses
+ * crtc
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ unsigned int flags;
+ unsigned int crtc;
+ int x;
+ int y;
+ uint32_t width;
+ uint32_t height;
+ unsigned int handle;
+};
+
+/*
+ * oh so ugly hotplug
+ */
+struct drm_mode_hotplug {
+ uint32_t counter;
+};
+
+struct drm_mode_crtc_lut {
+
+ uint32_t crtc_id;
+ uint32_t gamma_size;
+
+ uint64_t red;
+ uint64_t green;
+ uint64_t blue;
};
/**
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
- if (dev->irq)
+ if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = 0;
- dev_priv->ring.map.handle = 0;
- dev_priv->ring.map.size = 0;
- }
-
- if (I915_NEED_GFX_HWS(dev))
- i915_free_hardware_status(dev);
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = 0;
+ dev_priv->ring.map.handle = 0;
+ dev_priv->ring.map.size = 0;
+ dev_priv->ring.Size = 0;
+ }
+
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+
+ if (dev_priv->hws_agpoffset) {
+ dev_priv->hws_agpoffset = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
return 0;
}
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
- return 0;
-}
-
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
-
- /* i915 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
-
- dev->dev_private = (void *)dev_priv;
- dev_priv->dev = dev;
-
- /* Add register map (needed for suspend/resume) */
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
-
- ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
- _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
-
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
- i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
-#ifdef __linux__
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
- intel_init_chipset_flush_compat(dev);
-#endif
-#endif
-
- /* Init HWS
- */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_hardware_status(dev);
- if(ret)
- return ret;
- }
-
- return ret;
-}
-
-int i915_driver_unload(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_free_hardware_status(dev);
-
- drm_rmmap(dev, dev_priv->mmio_map);
-
- drm_free(dev->dev_private, sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
-#ifdef __linux__
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
- intel_fini_chipset_flush_compat(dev);
-#endif
-#endif
- return 0;
-}
-
-void i915_driver_lastclose(struct drm_device * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- /* agp off can use this to get called before dev_priv */
- if (!dev_priv)
- return;
-
-#ifdef I915_HAVE_BUFFER
- if (dev_priv->val_bufs) {
- vfree(dev_priv->val_bufs);
- dev_priv->val_bufs = NULL;
- }
-#endif
- i915_gem_lastclose(dev);
-
- if (drm_getsarea(dev) && dev_priv->sarea_priv)
- i915_do_cleanup_pageflip(dev);
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
-#if defined(I915_HAVE_BUFFER)
- if (dev_priv->sarea_kmap.virtual) {
- drm_bo_kunmap(&dev_priv->sarea_kmap);
- dev_priv->sarea_kmap.virtual = NULL;
- dev->lock.hw_lock = NULL;
- dev->sigdata.lock = NULL;
- }
-
- if (dev_priv->sarea_bo) {
- mutex_lock(&dev->struct_mutex);
- drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
- mutex_unlock(&dev->struct_mutex);
- dev_priv->sarea_bo = NULL;
- }
-#endif
- i915_dma_cleanup(dev);
-}
+ dev_priv->hws_vaddr = dev_priv->hws_map.handle;
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv;
-
- DRM_DEBUG("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
-
- if (!i915_file_priv)
- return -ENOMEM;
-
- file_priv->driver_priv = i915_file_priv;
-
- i915_file_priv->mm.last_gem_seqno = 0;
- i915_file_priv->mm.last_gem_throttle_seqno = 0;
+ memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
- dev_priv->hws_agpoffset);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
+
return 0;
}
void *agp_iomap;
unsigned int max_validate_buffers;
struct mutex cmdbuf_mutex;
- size_t stolen_base;
++ u32 stolen_base;
struct drm_i915_validate_buffer *val_bufs;
#endif
DRM_SPINTYPE swaps_lock;
- drm_i915_vbl_swap_t vbl_swaps;
+ struct drm_i915_vbl_swap vbl_swaps;
unsigned int swaps_pending;
-#if defined(I915_HAVE_BUFFER)
+
+ /* LVDS info */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
/* DRI2 sarea */
- struct drm_buffer_object *sarea_bo;
- struct drm_bo_kmap_obj sarea_kmap;
+ struct drm_gem_object *sarea_object;
+ struct drm_bo_kmap_obj sarea_kmap;
#endif
- struct timer_list retire_timer;
- struct work_struct retire_task;
+
+ /* Feature bits from the VBIOS */
+ int int_tv_support:1;
+ int lvds_dither:1;
+ int lvds_vbt:1;
+ int int_crt_support:1;
+
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
++ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+ } mm;
+
+ struct work_struct user_interrupt_task;
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
-
- struct {
- struct drm_memrange gtt_space;
-
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- uint32_t next_gem_seqno;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
- } mm;
-} drm_i915_private_t;
+};
+ struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+ };
+
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
/* i915_dma.c */
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_unload(struct drm_device *dev);
extern void i915_driver_lastclose(struct drm_device * dev);
+ extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
+ extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
-extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
+extern void i915_user_irq_on(struct drm_device *dev);
+extern void i915_user_irq_off(struct drm_device *dev);
- extern void i915_user_interrupt_handler(struct work_struct *work);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
- int i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
- int i915_gem_flush_pwrite(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size);
void i915_gem_lastclose(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
- void i915_gem_retire_timeout(unsigned long data);
- void i915_gem_retire_handler(struct work_struct *work);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
+ void i915_gem_retire_work_handler(struct work_struct *work);
#endif
+extern unsigned int i915_fbpercrtc;
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
extern void intel_init_chipset_flush_compat(struct drm_device *dev);
--- /dev/null
- if (IS_I9XX(dev)) {
+/*
+ * Copyright (c) 2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
+ * 2004 Sylvain Meyer
+ *
+ * GPL/BSD dual license
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_bios.h"
+#include "intel_drv.h"
+
+/**
+ * i915_probe_agp - get AGP bootup configuration
+ * @pdev: PCI device
+ * @aperture_size: returns AGP aperture configured size
+ * @preallocated_size: returns size of BIOS preallocated AGP space
+ *
+ * Since Intel integrated graphics are UMA, the BIOS has to set aside
+ * some RAM for the framebuffer at early boot. This code figures out
+ * how much was set aside so we can use it for our own purposes.
+ */
+int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
+ unsigned long *preallocated_size)
+{
+ struct pci_dev *bridge_dev;
+ u16 tmp = 0;
+ unsigned long overhead;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+
+ /* Get the fb aperture size and "stolen" memory amount. */
+ pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
+ pci_dev_put(bridge_dev);
+
+ *aperture_size = 1024 * 1024;
+ *preallocated_size = 1024 * 1024;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_82830_CGC:
+ case PCI_DEVICE_ID_INTEL_82845G_IG:
+ case PCI_DEVICE_ID_INTEL_82855GM_IG:
+ case PCI_DEVICE_ID_INTEL_82865_IG:
+ if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+ *aperture_size *= 64;
+ else
+ *aperture_size *= 128;
+ break;
+ default:
+ /* 9xx supports large sizes, just look at the length */
+ *aperture_size = pci_resource_len(pdev, 2);
+ break;
+ }
+
+ /*
+ * Some of the preallocated space is taken by the GTT
+ * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
+ */
+ overhead = (*aperture_size / 1024) + 4096;
+ switch (tmp & INTEL_855_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ break; /* 1M already */
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ *preallocated_size *= 4;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ *preallocated_size *= 8;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ *preallocated_size *= 16;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ *preallocated_size *= 32;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ *preallocated_size *= 48;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ *preallocated_size *= 64;
+ break;
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_855_GMCH_GMS_MASK);
+ return -1;
+ }
+ *preallocated_size -= overhead;
+
+ return 0;
+}
+
+static int i915_init_hwstatus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_memrange_node *free_space;
+ int ret = 0;
+
+ /* Program Hardware Status Page */
+ if (!IS_G33(dev)) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ } else {
+ free_space = drm_memrange_search_free(&dev_priv->vram,
+ PAGE_SIZE,
+ PAGE_SIZE, 0);
+ if (!free_space) {
+ DRM_ERROR("No free vram available, aborting\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
+ PAGE_SIZE);
+ if (!dev_priv->hws) {
+ DRM_ERROR("Unable to allocate or pin hw status page\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_priv->hws_agpoffset = dev_priv->hws->start;
+ dev_priv->hws_map.offset = dev->agp->base +
+ dev_priv->hws->start;
+ dev_priv->hws_map.size = PAGE_SIZE;
+ dev_priv->hws_map.type= 0;
+ dev_priv->hws_map.flags= 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->hws_agpoffset = 0;
+ DRM_ERROR("can not ioremap virtual addr for"
+ "G33 hw status page\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ dev_priv->hws_vaddr = dev_priv->hws_map.handle;
+ I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
+ }
+
+ memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ return 0;
+
+out_free:
+ /* free hws */
+out:
+ return ret;
+}
+
+static void i915_cleanup_hwstatus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_G33(dev)) {
+ if (dev_priv->status_page_dmah)
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ } else {
+ if (dev_priv->hws_map.handle)
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ if (dev_priv->hws)
+ drm_memrange_put_block(dev_priv->hws);
+ }
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long agp_size, prealloc_size;
+ int ret = 0;
+
+ i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
+
+ /* Basic memrange allocator for stolen space (aka vram) */
+ drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
+ /* Let GEM Manage from end of prealloc space to end of aperture */
+ i915_gem_do_init(dev, prealloc_size, agp_size);
+
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret)
+ goto out;
+
+ ret = i915_init_hwstatus(dev);
+ if (ret)
+ goto destroy_ringbuffer;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
+ mutex_init(&dev_priv->cmdbuf_mutex);
+
+ dev_priv->wq = create_singlethread_workqueue("i915");
+ if (dev_priv->wq == 0) {
+ DRM_DEBUG("Error\n");
+ ret = -EINVAL;
+ goto destroy_hws;
+ }
+
+ ret = intel_init_bios(dev);
+ if (ret) {
+ DRM_ERROR("failed to find VBIOS tables\n");
+ ret = -ENODEV;
+ goto destroy_wq;
+ }
+
+ intel_modeset_init(dev);
+ drm_helper_initial_config(dev, false);
+
+ dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto modeset_cleanup;
+ }
+
+ ret = drm_irq_install(dev);
+ if (ret) {
+ kfree(dev->devname);
+ goto modeset_cleanup;
+ }
+ return 0;
+
+modeset_cleanup:
+ intel_modeset_cleanup(dev);
+destroy_wq:
+ destroy_workqueue(dev_priv->wq);
+destroy_hws:
+ i915_cleanup_hwstatus(dev);
+destroy_ringbuffer:
+ i915_gem_cleanup_ringbuffer(dev);
+out:
+ return ret;
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = 0;
+
+ dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ memset(dev_priv, 0, sizeof(struct drm_i915_private));
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
+
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ if (IS_MOBILE(dev) || IS_I9XX(dev))
+ dev_priv->cursor_needs_physical = true;
+ else
+ dev_priv->cursor_needs_physical = false;
+
+ if (IS_I965G(dev) || IS_G33(dev))
+ dev_priv->cursor_needs_physical = false;
+
- DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
- }
++ if (IS_I9XX(dev))
+ pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
- dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
- dev_priv->mm.retire_timer.data = (unsigned long) dev;
- init_timer_deferrable (&dev_priv->mm.retire_timer);
- INIT_WORK(&dev_priv->mm.retire_task,
- i915_gem_retire_handler);
- INIT_WORK(&dev_priv->user_interrupt_task,
- i915_user_interrupt_handler);
+
+ if (IS_I9XX(dev)) {
+ dev_priv->mmiobase = drm_get_resource_start(dev, 0);
+ dev_priv->mmiolen = drm_get_resource_len(dev, 0);
+ dev->mode_config.fb_base =
+ drm_get_resource_start(dev, 2) & 0xff000000;
+ } else if (drm_get_resource_start(dev, 1)) {
+ dev_priv->mmiobase = drm_get_resource_start(dev, 1);
+ dev_priv->mmiolen = drm_get_resource_len(dev, 1);
+ dev->mode_config.fb_base =
+ drm_get_resource_start(dev, 0) & 0xff000000;
+ } else {
+ DRM_ERROR("Unable to find MMIO registers\n");
+ ret = -ENODEV;
+ goto free_priv;
+ }
+
+ DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base);
+
+ ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen,
+ _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER,
+ &dev_priv->mmio_map);
+ if (ret != 0) {
+ DRM_ERROR("Cannot add mapping for MMIO registers\n");
+ goto free_priv;
+ }
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
++ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
++ i915_gem_retire_work_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
+#ifdef __linux__
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+ intel_init_chipset_flush_compat(dev);
+#endif
+#endif
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_rmmap;
+ }
+ }
+ return 0;
+
+out_rmmap:
+ drm_rmmap(dev, dev_priv->mmio_map);
+free_priv:
+ drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(PRB0_CTL, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_irq_uninstall(dev);
+ intel_modeset_cleanup(dev);
+ destroy_workqueue(dev_priv->wq);
+ }
+
+#if 0
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ }
+#endif
+
+#ifdef DRI2
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+#endif
+ i915_cleanup_hwstatus(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
+ drm_memrange_takedown(&dev_priv->vram);
+ i915_gem_lastclose(dev);
+ }
+
+#ifdef __linux__
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+ intel_init_chipset_flush_compat(dev);
+#endif
+#endif
+
+ DRM_DEBUG("%p\n", dev_priv->mmio_map);
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv;
+ unsigned long sareapage;
+ int ret;
+
+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+ if (!master_priv)
+ return -ENOMEM;
+
+ /* prebuild the SAREA */
+ sareapage = max(SAREA_MAX, PAGE_SIZE);
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ &master_priv->sarea);
+ if (ret) {
+ DRM_ERROR("SAREA setup failed\n");
+ return ret;
+ }
+ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+ master_priv->sarea_priv->pf_current_page = 0;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ if (master_priv->sarea)
+ drm_rmmap(dev, master_priv->sarea);
+
+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+ master->driver_priv = NULL;
+}
+
++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct drm_i915_file_private *i915_file_priv;
++
++ DRM_DEBUG("\n");
++ i915_file_priv = (struct drm_i915_file_private *)
++ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
++
++ if (!i915_file_priv)
++ return -ENOMEM;
++
++ file_priv->driver_priv = i915_file_priv;
++
++ i915_file_priv->mm.last_gem_seqno = 0;
++ i915_file_priv->mm.last_gem_throttle_seqno = 0;
++
++ return 0;
++}
++
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++
++ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
++}
++
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+}
+
+void i915_driver_lastclose(struct drm_device * dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+#ifdef I915_HAVE_BUFFER
+ if (dev_priv->val_bufs) {
+ vfree(dev_priv->val_bufs);
+ dev_priv->val_bufs = NULL;
+ }
+#endif
+
+ i915_gem_lastclose(dev);
+
+ if (dev_priv->agp_heap)
+ i915_mem_takedown(&(dev_priv->agp_heap));
+
+#if defined(DRI2)
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->control->master->lock.hw_lock = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+#endif
+
+ i915_dma_cleanup(dev);
+}
+
+int i915_driver_firstopen(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
+ drm_bo_driver_init(dev);
+#endif
+ return 0;
+}
return count;
}
- void
- i915_user_interrupt_handler(struct work_struct *work)
- {
- struct drm_i915_private *dev_priv;
- struct drm_device *dev;
-
- dev_priv = container_of(work, struct drm_i915_private,
- user_interrupt_task);
- dev = dev_priv->dev;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
- mutex_unlock(&dev->struct_mutex);
- }
-
+static struct drm_device *hotplug_dev;
+
+/**
+ * Handler for user interrupts in process context (able to sleep, do VFS
+ * operations, etc.
+ *
+ * If another IRQ comes in while we're in this handler, it will still get put
+ * on the queue again to be rerun when we finish.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void i915_hotplug_work_func(void *work)
+#else
+static void i915_hotplug_work_func(struct work_struct *work)
+#endif
+{
+ struct drm_device *dev = hotplug_dev;
+
+ drm_helper_hotplug_stage_two(dev);
+ drm_handle_hotplug(dev);
+}
+
+static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ static DECLARE_WORK(hotplug, i915_hotplug_work_func, NULL);
+#else
+ static DECLARE_WORK(hotplug, i915_hotplug_work_func);
+#endif
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ hotplug_dev = dev;
+
+ if (stat & TV_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("TV event\n");
+ }
+
+ if (stat & CRT_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("CRT event\n");
+ }
+
+ if (stat & SDVOB_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("sDVOB event\n");
+ }
+
+ if (stat & SDVOC_HOTPLUG_INT_STATUS) {
+ DRM_DEBUG("sDVOC event\n");
+ }
+ queue_work(dev_priv->wq, &hotplug);
+
+ return 0;
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u32 iir;
- u32 pipea_stats = 0, pipeb_stats, tvdac;
- u32 pipea_stats = 0, pipeb_stats = 0;
++ u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
+ int hotplug = 0;
int vblank = 0;
- iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
+ /* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
+ iir = I915_READ(IIR);
+ else
+ iir = I915_READ16(IIR);
+
- I915_WRITE(I915REG_INT_ENABLE_R, 0);
- iir = I915_READ(I915REG_INT_IDENTITY_R);
-#if 0
- DRM_DEBUG("flag=%08x\n", iir);
-#endif
- if (iir == 0) {
- DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
- iir,
- I915_READ(I915REG_INT_MASK_R),
- I915_READ(I915REG_INT_ENABLE_R),
- I915_READ(I915REG_PIPEASTAT),
- I915_READ(I915REG_PIPEBSTAT));
- if (dev->pdev->msi_enabled)
- I915_WRITE(I915REG_INT_ENABLE_R,
- I915_INTERRUPT_ENABLE_MASK);
+ if (dev->pdev->msi_enabled)
- #if 0
- DRM_DEBUG("flag=%08x\n", iir);
- #endif
- if (iir == 0) {
- #if 0
- DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
- iir,
- I915_READ(IMR),
- I915_READ(IER),
- I915_READ(PIPEASTAT),
- I915_READ(PIPEBSTAT));
- #endif
++ I915_WRITE(IER, 0);
+
++ if (!iir)
return IRQ_NONE;
-- }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
drm_locked_tasklet(dev, i915_vblank_tasklet);
}
- I915_WRITE(I915REG_INT_ENABLE_R, I915_INTERRUPT_ENABLE_MASK);
+ if ((iir & I915_DISPLAY_PORT_INTERRUPT) || hotplug) {
+ u32 temp2 = 0;
+
+ DRM_INFO("Hotplug event received\n");
+
+ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev)) {
+ if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
+ temp2 |= SDVOB_HOTPLUG_INT_STATUS |
+ SDVOC_HOTPLUG_INT_STATUS;
+ if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS)
+ temp2 |= TV_HOTPLUG_INT_STATUS;
+ } else {
+ temp2 = I915_READ(PORT_HOTPLUG_STAT);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, temp2);
+ }
+ i915_run_hotplug_tasklet(dev, temp2);
+ }
+
+ if (dev->pdev->msi_enabled)
++ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++
return IRQ_HANDLED;
}
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
++ master_priv = dev->primary->master->driver_priv;
++
++ if (!master_priv) {
++ DRM_ERROR("no master priv?\n");
++ return -EINVAL;
++ }
++
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
++ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
+ }
- i915_user_irq_on(dev_priv);
+ i915_user_irq_on(dev);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_off(dev_priv);
+ i915_user_irq_off(dev);
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
+
- }
++ if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
return ret;
}