void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
+ /* Master routines */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
++
+ int (*proc_init)(struct drm_minor *minor);
+ void (*proc_cleanup)(struct drm_minor *minor);
+
/**
* Driver-specific constructor for drm_gem_objects, to set up
* obj->driver_private.
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
- extern int drm_put_minor(struct drm_minor **minor);
-extern int drm_put_minor(struct drm_device *dev);
++extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern struct class *drm_class;
drm_memrange_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
- drm_put_minor(&dev->primary);
- drm_put_minor(dev);
++ drm_put_minor(dev, &dev->primary);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
++ drm_put_minor(dev, &dev->control);
++
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
driver->date, dev->primary->index);
return 0;
- drm_put_minor(&dev->primary);
+err_g5:
++ drm_put_minor(dev, &dev->primary);
err_g4:
- drm_put_minor(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
++ drm_put_minor(dev, &dev->control);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
* last minor released.
*
*/
- int drm_put_minor(struct drm_minor **minor_p)
-int drm_put_minor(struct drm_device *dev)
++int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p)
{
- struct drm_minor **minor_p = &dev->primary;
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+ .proc_init = i915_gem_proc_init,
+ .proc_cleanup = i915_gem_proc_cleanup,
.ioctls = i915_ioctls,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
uint32_t read_domains,
uint32_t write_domain);
-int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ static void
+ i915_gem_clflush_object(struct drm_gem_object *obj);
+
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- mutex_lock(&dev->struct_mutex);
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
- (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (start >= end ||
+ (start & (PAGE_SIZE - 1)) != 0 ||
+ (end & (PAGE_SIZE - 1)) != 0) {
return -EINVAL;
}
return (int32_t)(seq1 - seq2) >= 0;
}
- static uint32_t
+ uint32_t
i915_get_gem_seqno(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
BUG_ON(seqno == 0);
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- i915_user_irq_on(dev_priv);
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_on(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno));
+ seqno) || dev_priv->mm.wedged);
- i915_user_irq_off(dev_priv);
+ i915_user_irq_off(dev);
+ dev_priv->mm.waiting_gem_seqno = 0;
}
+ if (dev_priv->mm.wedged)
+ ret = -EIO;
+
if (ret)
DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
__func__, ret, seqno, i915_get_gem_seqno(dev));
return 0;
}
- void
- i915_gem_lastclose(struct drm_device *dev)
+ /** Unbinds all objects that are on the given buffer list. */
+ static int
+ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+ {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ while (!list_empty(head)) {
+ obj_priv = list_first_entry(head,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Pinned object in unbind list\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+ ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+
+ return 0;
+ }
+
+ static int
+ i915_gem_idle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t seqno, cur_seqno, last_seqno;
+ int stuck;
- mutex_lock(&dev->struct_mutex);
+ if (dev_priv->mm.suspended)
+ return 0;
- /* Assume that the chip has been idled at this point. Just pull them
- * off the execution list and unref them. Since this is the last
- * close, this is also the last ref and they'll go away.
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
*/
+ dev_priv->mm.suspended = 1;
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ i915_kernel_lost_context(dev);
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
+ /* Flush the GPU along with all non-CPU write domains
+ */
+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- list_del_init(&obj_priv->list);
- obj_priv->active = 0;
- obj_priv->obj->write_domain = 0;
- drm_gem_object_unreference(obj_priv->obj);
+ if (seqno == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
}
- mutex_unlock(&dev->struct_mutex);
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ last_seqno = 0;
+ stuck = 0;
+ for (;;) {
+ cur_seqno = i915_get_gem_seqno(dev);
+ if (i915_seqno_passed(cur_seqno, seqno))
+ break;
+ if (last_seqno == cur_seqno) {
+ if (stuck++ > 100) {
+ DRM_ERROR("hardware wedged\n");
+ dev_priv->mm.wedged = 1;
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ break;
+ }
+ }
+ msleep(10);
+ last_seqno = cur_seqno;
+ }
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Active and flushing should now be empty as we've
+ * waited for a sequence higher than any pending execbuffer
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ /* Request should now be empty as we've also waited
+ * for the last request in the list
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+ /* Move all buffers out of the GTT. */
+ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ return 0;
}
-static int
+int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Renabling wedged hardware, good luck\n");
+ dev_priv->mm.wedged = 0;
+ }
+
ret = i915_gem_init_ringbuffer(dev);
if (ret != 0)
return ret;
return 0;
}
- /** Unbinds all objects that are on the given buffer list. */
- static int
- i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+ int
+ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
int ret;
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+ if (ret == 0)
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
+ return 0;
+ }
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ static int i915_gem_active_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+ {
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Active:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
+ obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
}
}
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+ }
- drm_i915_private_t *dev_priv = dev->dev_private;
+ static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+ {
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
- return 0;
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Flushing:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
}
- int
- i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
- uint32_t seqno;
- int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
- mutex_lock(&dev->struct_mutex);
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
- i915_kernel_lost_context(dev);
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Inactive:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+ }
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, ~I915_GEM_DOMAIN_CPU, ~I915_GEM_DOMAIN_CPU);
- seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
- if (seqno == 0) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ static int i915_gem_request_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+ {
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
}
- ret = i915_wait_request(dev, seqno);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+ list)
+ {
+ DRM_PROC_PRINT (" %d @ %d %08x\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies),
+ gem_request->flush_domains);
}
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+ }
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+ {
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ int len = 0;
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
- /* Move all buffers out of the GTT. */
- i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+ }
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
- i915_gem_cleanup_ringbuffer(dev);
+ static int i915_interrupt_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+ {
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
- mutex_unlock(&dev->struct_mutex);
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
- I915_READ(I915REG_INT_ENABLE_R));
++ I915_READ(IER));
+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
- I915_READ(I915REG_INT_IDENTITY_R));
++ I915_READ(IIR));
+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
- I915_READ(I915REG_INT_MASK_R));
++ I915_READ(IMR));
+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
- I915_READ(I915REG_PIPEASTAT));
++ I915_READ(PIPEASTAT));
+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
- I915_READ(I915REG_PIPEBSTAT));
++ I915_READ(PIPEBSTAT));
+ DRM_PROC_PRINT("Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+ }
+ static struct drm_proc_list {
+ const char *name; /**< file name */
+ int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
+ } i915_gem_proc_list[] = {
+ {"i915_gem_active", i915_gem_active_info},
+ {"i915_gem_flushing", i915_gem_flushing_info},
+ {"i915_gem_inactive", i915_gem_inactive_info},
+ {"i915_gem_request", i915_gem_request_info},
+ {"i915_gem_seqno", i915_gem_seqno_info},
+ {"i915_gem_interrupt", i915_interrupt_info},
+ };
+
+ #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+ int i915_gem_proc_init(struct drm_minor *minor)
+ {
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(i915_gem_proc_list[i].name,
+ S_IFREG | S_IRUGO, minor->dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+ i915_gem_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(i915_gem_proc_list[i].name,
+ minor->dev_root);
+ return -1;
+ }
+ ent->read_proc = i915_gem_proc_list[i].f;
+ ent->data = minor;
+ }
return 0;
}
- drm_i915_private_t *dev_priv = dev->dev_private;
+
+ void i915_gem_proc_cleanup(struct drm_minor *minor)
+ {
+ int i;
+
+ if (!minor->dev_root)
+ return;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+ }
+
+ void
+ i915_gem_lastclose(struct drm_device *dev)
+ {
+ int ret;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev_priv->ring.ring_obj != NULL) {
+ ret = i915_gem_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+
+ i915_gem_cleanup_ringbuffer(dev);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ }
#endif
DRM_SPINTYPE swaps_lock;
- drm_i915_vbl_swap_t vbl_swaps;
+ struct drm_i915_vbl_swap vbl_swaps;
unsigned int swaps_pending;
-#if defined(I915_HAVE_BUFFER)
+
+ /* LVDS info */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
/* DRI2 sarea */
- struct drm_buffer_object *sarea_bo;
- struct drm_bo_kmap_obj sarea_kmap;
+ struct drm_gem_object *sarea_object;
+ struct drm_bo_kmap_obj sarea_kmap;
#endif
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
+
+ /* Feature bits from the VBIOS */
+ int int_tv_support:1;
+ int lvds_dither:1;
+ int lvds_vbt:1;
+ int int_crt_support:1;
+
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
++ /**
++ * Waiting sequence number, if any
++ */
++ uint32_t waiting_gem_seqno;
++
++ /**
++ * Last seq seen at irq time
++ */
++ uint32_t irq_gem_seqno;
++
++ /**
++ * Flag if the X Server, and thus DRM, is not currently in
++ * control of the device.
++ *
++ * This is set between LeaveVT and EnterVT. It needs to be
++ * replaced with a semaphore. It also needs to be
++ * transitioned away from for kernel modesetting.
++ */
++ int suspended;
++
++ /**
++ * Flag if the hardware appears to be wedged.
++ *
++ * This is set when attempts to idle the device timeout.
++ * It prevents command submission from occuring and makes
++ * every pending request fail
++ */
++ int wedged;
+ } mm;
+
+ struct work_struct user_interrupt_task;
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+ uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
void i915_gem_retire_work_handler(struct work_struct *work);
#endif
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
-i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+ static inline void
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ(I915REG_INT_MASK_R);
++i915_enable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+ {
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
-i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IMR);
+ }
+ }
+
+ static inline void
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ(I915REG_INT_MASK_R);
++i915_disable_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+ {
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IMR);
+ }
+ }
+
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u32 iir;
- u32 pipea_stats = 0, pipeb_stats = 0;
+ u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
+ int hotplug = 0;
int vblank = 0;
- /* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- iir = I915_READ(IIR);
- else
- iir = I915_READ16(IIR);
-
if (dev->pdev->msi_enabled)
- I915_WRITE(IER, 0);
-
- if (!iir)
- I915_WRITE(I915REG_INT_MASK_R, ~0);
- iir = I915_READ(I915REG_INT_IDENTITY_R);
++ I915_WRITE(IMR, ~0);
++ iir = I915_READ(IIR);
+ #if 0
+ DRM_DEBUG("flag=%08x\n", iir);
+ #endif
+ atomic_inc(&dev_priv->irq_received);
+ if (iir == 0) {
+ DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
+ iir,
- I915_READ(I915REG_INT_MASK_R),
- I915_READ(I915REG_INT_ENABLE_R),
- I915_READ(I915REG_PIPEASTAT),
- I915_READ(I915REG_PIPEBSTAT));
++ I915_READ(IMR),
++ I915_READ(IER),
++ I915_READ(PIPEASTAT),
++ I915_READ(PIPEBSTAT));
+ if (dev->pdev->msi_enabled) {
- I915_WRITE(I915REG_INT_MASK_R,
++ I915_WRITE(IMR,
+ dev_priv->irq_mask_reg);
- (void) I915_READ(I915REG_INT_MASK_R);
++ (void) I915_READ(IMR);
+ }
return IRQ_NONE;
+ }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
* we may get extra interrupts.
*/
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
- pipea_stats = I915_READ(I915REG_PIPEASTAT);
- I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
+ pipea_stats = I915_READ(PIPEASTAT);
- if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS))
- {
- vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 0));
- }
-
- /* This is a global event, and not a pipe A event */
- if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
- hotplug = 1;
-
- if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS) {
- hotplug = 1;
- /* Toggle hotplug detection to clear hotplug status */
- tvdac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, tvdac & ~TVDAC_STATE_CHG_EN);
- I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
- }
-
+ I915_WRITE(PIPEASTAT, pipea_stats);
}
+
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
- I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
+ pipeb_stats = I915_READ(PIPEBSTAT);
- if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS))
- {
- vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 1));
- }
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
}
- /* Clear the generated interrupt */
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- I915_WRITE(IIR, iir);
- (void) I915_READ(IIR);
- } else {
- I915_WRITE16(IIR, iir);
- (void) I915_READ16(IIR);
- I915_WRITE(I915REG_INT_IDENTITY_R, iir);
++ I915_WRITE(IIR, iir);
+ if (dev->pdev->msi_enabled)
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- (void) I915_READ(I915REG_INT_IDENTITY_R); /* Flush posted writes */
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ (void) I915_READ(IIR); /* Flush posted writes */
++
++ /* This is a global event, and not a pipe A event */
++ if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
++ hotplug = 1;
++
++ if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS) {
++ hotplug = 1;
++ /* Toggle hotplug detection to clear hotplug status */
++ tvdac = I915_READ(TV_DAC);
++ I915_WRITE(TV_DAC, tvdac & ~TVDAC_STATE_CHG_EN);
++ I915_WRITE(TV_DAC, tvdac | TVDAC_STATE_CHG_EN);
+ }
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ }
if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
#endif
}
-- if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
-- I915_VBLANK_INTERRUPT_STATUS)) {
-- vblank = 1;
++ if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++ PIPE_VBLANK_INTERRUPT_STATUS)) {
++ vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 0));
}
-- if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
-- I915_VBLANK_INTERRUPT_STATUS)) {
-- vblank = 1;
++
++ if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++ PIPE_VBLANK_INTERRUPT_STATUS)) {
++ vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 1));
}
++
if (vblank) {
if (dev_priv->swaps_pending > 0)
drm_locked_tasklet(dev, i915_vblank_tasklet);
}
- if (dev->pdev->msi_enabled)
- I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
-
+ if ((iir & I915_DISPLAY_PORT_INTERRUPT) || hotplug) {
+ u32 temp2 = 0;
+
+ DRM_INFO("Hotplug event received\n");
+
+ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev)) {
+ if (pipea_stats & PIPE_HOTPLUG_INTERRUPT_STATUS)
+ temp2 |= SDVOB_HOTPLUG_INT_STATUS |
+ SDVOC_HOTPLUG_INT_STATUS;
+ if (pipea_stats & PIPE_HOTPLUG_TV_INTERRUPT_STATUS)
+ temp2 |= TV_HOTPLUG_INT_STATUS;
+ } else {
+ temp2 = I915_READ(PORT_HOTPLUG_STAT);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, temp2);
+ }
+ i915_run_hotplug_tasklet(dev, temp2);
+ }
+
return IRQ_HANDLED;
}
return dev_priv->counter;
}
-void i915_user_irq_on(drm_i915_private_t *dev_priv)
+void i915_user_irq_on(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- else
- I915_WRITE16(IMR, dev_priv->irq_mask_reg);
- I915_READ16(IMR);
- }
+ if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
-
}
-
-void i915_user_irq_off(drm_i915_private_t *dev_priv)
+
+void i915_user_irq_off(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+
DRM_SPINLOCK(&dev_priv->user_irq_lock);
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
- if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- else
- I915_WRITE16(IMR, dev_priv->irq_mask_reg);
- I915_READ16(IMR);
- }
+ if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
/*
* Clear any pending status
*/
- pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
- I915_VBLANK_INTERRUPT_STATUS);
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
+
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- dev_priv->irq_mask_reg &= ~mask_reg;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- else
- I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ i915_enable_irq(dev_priv, mask_reg);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- dev_priv->irq_mask_reg |= mask_reg;
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- else
- I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ i915_disable_irq(dev_priv, mask_reg);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
- if (pipestat_reg)
- {
+
+ if (pipestat_reg) {
pipestat = I915_READ (pipestat_reg);
- pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
- I915_VBLANK_INTERRUPT_ENABLE);
+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
/*
* Clear any pending status
*/
}
}
-static void i915_enable_interrupt (struct drm_device *dev)
+void i915_enable_interrupt (struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_connector *o;
-
- dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
+
- dev_priv->irq_mask_reg = ~0;
- I915_WRITE(I915REG_INT_MASK_R, dev_priv->irq_mask_reg);
- I915_WRITE(I915REG_INT_ENABLE_R, I915_INTERRUPT_ENABLE_MASK);
- (void) I915_READ (I915REG_INT_ENABLE_R);
- dev_priv->irq_enabled = 1;
-}
++ dev_priv->irq_mask_reg &= ~0;
-static void i915_disable_interrupt (struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- I915_WRITE(I915REG_HWSTAM, 0xffffffff);
- I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
- I915_WRITE(I915REG_INT_ENABLE_R, 0);
- I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
- (void) I915_READ (I915REG_INT_IDENTITY_R);
- dev_priv->irq_enabled = 0;
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ if (dev->mode_config.num_connector)
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ } else {
+ if (dev->mode_config.num_connector)
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+
+ /* Enable global interrupts for hotplug - not a pipeA event */
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_INTERRUPT_STATUS);
+ }
+
+ if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
+ !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
+ u32 temp = 0;
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ /* Activate the CRT */
+ temp |= CRT_HOTPLUG_INT_EN;
+ }
+
+ if (IS_I9XX(dev)) {
+ /* SDVOB */
+ o = intel_sdvo_find(dev, 1);
+ if (o && intel_sdvo_supports_hotplug(o)) {
+ intel_sdvo_set_hotplug(o, 1);
+ temp |= SDVOB_HOTPLUG_INT_EN;
+ }
+
+ /* SDVOC */
+ o = intel_sdvo_find(dev, 0);
+ if (o && intel_sdvo_supports_hotplug(o)) {
+ intel_sdvo_set_hotplug(o, 1);
+ temp |= SDVOC_HOTPLUG_INT_EN;
+ }
+
+ I915_WRITE(SDVOB, I915_READ(SDVOB) | SDVO_INTERRUPT_ENABLE);
+ I915_WRITE(SDVOC, I915_READ(SDVOC) | SDVO_INTERRUPT_ENABLE);
+
+ /* TV */
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) | TVDAC_STATE_CHG_EN);
+ } else {
+ /* DVO ???? */
+ }
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, temp);
+
+ DRM_DEBUG("HEN %08x\n",I915_READ(PORT_HOTPLUG_EN));
+ DRM_DEBUG("HST %08x\n",I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+ }
+
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- I915_WRITE(IER, ~dev_priv->irq_mask_reg);
- } else {
- I915_WRITE16(IMR, dev_priv->irq_mask_reg);
- I915_WRITE16(IER, ~(u16)dev_priv->irq_mask_reg);
- }
-
++ I915_WRITE(IMR, dev_priv->irq_mask_reg);
++ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++ (void) I915_READ (IER);
+ dev_priv->irq_enabled = 1;
}
/* Set the vblank monitor pipe
int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_vblank_pipe *pipe = data;
- u16 flag;
+ u32 flag = 0;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- flag = I915_READ(IER);
- else
- flag = I915_READ16(IER);
+ if (dev_priv->irq_enabled)
+ flag = ~dev_priv->irq_mask_reg;
+
pipe->pipe = 0;
if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
*/
void i915_driver_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(PIPEASTAT);
+ I915_WRITE(PIPEASTAT, tmp);
+ tmp = I915_READ(PIPEBSTAT);
+ I915_WRITE(PIPEBSTAT, tmp);
-
- I915_WRITE16(HWSTAM, 0xeffe);
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- I915_WRITE(IMR, 0x0);
- I915_WRITE(IER, 0x0);
- tmp = I915_READ(IIR);
- I915_WRITE(IIR, tmp);
- } else {
- I915_WRITE16(IMR, 0x0);
- I915_WRITE16(IER, 0x0);
- tmp = I915_READ16(IIR);
- I915_WRITE16(IIR, tmp);
- }
+ atomic_set(&dev_priv->irq_received, 0);
- I915_WRITE(I915REG_HWSTAM, 0xffff);
- I915_WRITE(I915REG_INT_ENABLE_R, 0x0);
- I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
- I915_WRITE(I915REG_INT_IDENTITY_R, 0xffffffff);
- (void) I915_READ(I915REG_INT_IDENTITY_R);
++ I915_WRITE(HWSTAM, 0xffff);
++ I915_WRITE(IER, 0x0);
++ I915_WRITE(IMR, 0xffffffff);
++ I915_WRITE(IIR, 0xffffffff);
++ (void) I915_READ(IIR);
}
int i915_driver_irq_postinstall(struct drm_device * dev)