[gem] Manage the ringbuffer from the kernel in the GEM case.
authorEric Anholt <eric@anholt.net>
Tue, 10 Jun 2008 22:30:23 +0000 (15:30 -0700)
committerEric Anholt <eric@anholt.net>
Wed, 11 Jun 2008 05:57:07 +0000 (22:57 -0700)
This requires that the X Server use the execbuf interface for buffer
submission, as it no longer has direct access to the ring.  This is
therefore a flag day for the gem interface.

This also adds enter/leavevt ioctls for use by the X Server.  These would
get stubbed out in a modesetting implementation, but are required while
in an environment where the device's state is only managed by the DRM while
X has the VT.

linux-core/drmP.h
linux-core/drm_gem.c
linux-core/i915_gem.c
shared-core/i915_dma.c
shared-core/i915_drm.h
shared-core/i915_drv.h
shared-core/i915_irq.c

index 8246f44..6a7f28d 100644 (file)
@@ -1371,6 +1371,9 @@ drm_gem_init (struct drm_device *dev);
 void
 drm_gem_object_free (struct kref *kref);
 
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size);
+
 void
 drm_gem_object_handle_free (struct kref *kref);
     
index b726e59..a8ecaf7 100644 (file)
@@ -80,7 +80,7 @@ drm_gem_init(struct drm_device *dev)
 /**
  * Allocate a GEM object of the specified size with shmfs backing store
  */
-static struct drm_gem_object *
+struct drm_gem_object *
 drm_gem_object_alloc(struct drm_device *dev, size_t size)
 {
        struct drm_gem_object *obj;
@@ -117,6 +117,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
        atomic_inc(&dev->object_count);
        return obj;
 }
+EXPORT_SYMBOL(drm_gem_object_alloc);
 
 /**
  * Removes the mapping from handle to filp for this object.
index d60a98f..2564f41 100644 (file)
@@ -702,14 +702,17 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
        BUG_ON(obj_priv->page_list != NULL);
        obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
                                         DRM_MEM_DRIVER);
-       if (obj_priv->page_list == NULL)
+       if (obj_priv->page_list == NULL) {
+               DRM_ERROR("Faled to allocate page list\n");
                return -ENOMEM;
+       }
 
        for (i = 0; i < page_count; i++) {
                obj_priv->page_list[i] =
                    find_or_create_page(obj->filp->f_mapping, i, GFP_HIGHUSER);
 
                if (obj_priv->page_list[i] == NULL) {
+                       DRM_ERROR("Failed to find_or_create_page()\n");
                        i915_gem_object_free_page_list(obj);
                        return -ENOMEM;
                }
@@ -758,14 +761,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
                DRM_INFO("%s: GTT full, evicting something\n", __func__);
 #endif
                if (list_empty(&dev_priv->mm.inactive_list) &&
+                   list_empty(&dev_priv->mm.flushing_list) &&
                    list_empty(&dev_priv->mm.active_list)) {
                        DRM_ERROR("GTT full, but LRU list empty\n");
                        return -ENOMEM;
                }
 
                ret = i915_gem_evict_something(dev);
-               if (ret != 0)
+               if (ret != 0) {
+                       DRM_ERROR("Failed to evict a buffer\n");
                        return ret;
+               }
                goto search_free;
        }
 
@@ -1383,6 +1389,7 @@ int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
+       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_exec_object *exec_list = NULL;
        struct drm_gem_object **object_list = NULL;
@@ -1423,6 +1430,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
 
+       if (dev_priv->mm.suspended) {
+               DRM_ERROR("Execbuf while VT-switched.\n");
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+
        /* Zero the gloabl flush/invalidate flags. These
         * will be modified as each object is bound to the
         * gtt
@@ -1560,6 +1573,37 @@ pre_mutex_err:
 }
 
 int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int ret;
+
+       if (obj_priv->gtt_space == NULL) {
+               ret = i915_gem_object_bind_to_gtt(obj, alignment);
+               if (ret != 0) {
+                       DRM_ERROR("Failure to bind in "
+                                 "i915_gem_pin_ioctl(): %d\n",
+                                 ret);
+                       drm_gem_object_unreference(obj);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
+       }
+
+       obj_priv->pin_count++;
+       return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       obj_priv->pin_count--;
+}
+
+int
 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                   struct drm_file *file_priv)
 {
@@ -1578,22 +1622,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
-
        obj_priv = obj->driver_private;
-       if (obj_priv->gtt_space == NULL) {
-               ret = i915_gem_object_bind_to_gtt(obj,
-                                                 (unsigned) args->alignment);
-               if (ret != 0) {
-                       DRM_ERROR("Failure to bind in "
-                                 "i915_gem_pin_ioctl(): %d\n",
-                                 ret);
-                       drm_gem_object_unreference(obj);
-                       mutex_unlock(&dev->struct_mutex);
-                       return ret;
-               }
+
+       ret = i915_gem_object_pin(obj, args->alignment);
+       if (ret != 0) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
        }
 
-       obj_priv->pin_count++;
        args->offset = obj_priv->gtt_offset;
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
@@ -1607,7 +1644,6 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -1620,8 +1656,8 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       obj_priv = obj->driver_private;
-       obj_priv->pin_count--;
+       i915_gem_object_unpin(obj);
+
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
        return 0;
@@ -1757,3 +1793,173 @@ i915_gem_lastclose(struct drm_device *dev)
 
        mutex_unlock(&dev->struct_mutex);
 }
+
+static int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       obj = drm_gem_object_alloc(dev, 128 * 1024);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate ringbuffer\n");
+               return -ENOMEM;
+       }
+       obj_priv = obj->driver_private;
+
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret != 0)
+               return ret;
+
+       /* Set up the kernel mapping for the ring. */
+       dev_priv->ring.Size = obj->size;
+       dev_priv->ring.tail_mask = obj->size - 1;
+
+       dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+       dev_priv->ring.map.size = obj->size;
+       dev_priv->ring.map.type = 0;
+       dev_priv->ring.map.flags = 0;
+       dev_priv->ring.map.mtrr = 0;
+
+       drm_core_ioremap(&dev_priv->ring.map, dev);
+       if (dev_priv->ring.map.handle == NULL) {
+               DRM_ERROR("Failed to map ringbuffer.\n");
+               memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+               drm_gem_object_unreference(obj);
+               return -EINVAL;
+       }
+       dev_priv->ring.ring_obj = obj;
+       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+       /* Stop the ring if it's running. */
+       I915_WRITE(LP_RING + RING_LEN, 0);
+       I915_WRITE(LP_RING + RING_HEAD, 0);
+       I915_WRITE(LP_RING + RING_TAIL, 0);
+       I915_WRITE(LP_RING + RING_START, 0);
+
+       /* Initialize the ring. */
+       I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
+       I915_WRITE(LP_RING + RING_LEN,
+                  ((obj->size - 4096) & RING_NR_PAGES) |
+                  RING_NO_REPORT |
+                  RING_VALID);
+
+       /* Update our cache of the ring state */
+       i915_kernel_lost_context(dev);
+
+       return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (dev_priv->ring.ring_obj == NULL)
+               return;
+
+       drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+       i915_gem_object_unpin(dev_priv->ring.ring_obj);
+       drm_gem_object_unreference(dev_priv->ring.ring_obj);
+
+       memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = i915_gem_init_ringbuffer(dev);
+       if (ret != 0)
+               return ret;
+
+       mutex_lock(&dev->struct_mutex);
+       dev_priv->mm.suspended = 0;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       while (!list_empty(head)) {
+               obj_priv = list_first_entry(head,
+                                           struct drm_i915_gem_object,
+                                           list);
+               obj = obj_priv->obj;
+
+               if (obj_priv->pin_count != 0) {
+                       DRM_ERROR("Pinned object in unbind list\n");
+                       mutex_unlock(&dev->struct_mutex);
+                       return -EINVAL;
+               }
+
+               ret = i915_gem_object_unbind(obj);
+               if (ret != 0) {
+                       DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+                                 ret);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
+       }
+
+
+       return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       mutex_lock(&dev->struct_mutex);
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        */
+       dev_priv->mm.suspended = 1;
+
+       /* Move all buffers out of the GTT. */
+       i915_gem_evict_from_list(dev, &dev_priv->mm.active_list);
+       i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list);
+       i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+
+       /* Make sure the harware's idle. */
+       while (!list_empty(&dev_priv->mm.request_list)) {
+               struct drm_i915_gem_request *request;
+               int ret;
+
+               request = list_first_entry(&dev_priv->mm.request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               ret = i915_wait_request(dev, request->seqno);
+               if (ret != 0) {
+                       DRM_ERROR("Error waiting for idle at LeaveVT: %d\n",
+                                 ret);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
+       }
+
+       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+       BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+
+       i915_gem_cleanup_ringbuffer(dev);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
index e5c6d0c..4243b4e 100644 (file)
@@ -255,27 +255,27 @@ static int i915_initialize(struct drm_device * dev,
                dev_priv->sarea_priv = NULL;
        }
 
-       dev_priv->ring.Start = init->ring_start;
-       dev_priv->ring.End = init->ring_end;
-       dev_priv->ring.Size = init->ring_size;
-       dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+       if (init->ring_size != 0) {
+               dev_priv->ring.Size = init->ring_size;
+               dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
 
-       dev_priv->ring.map.offset = init->ring_start;
-       dev_priv->ring.map.size = init->ring_size;
-       dev_priv->ring.map.type = 0;
-       dev_priv->ring.map.flags = 0;
-       dev_priv->ring.map.mtrr = 0;
+               dev_priv->ring.map.offset = init->ring_start;
+               dev_priv->ring.map.size = init->ring_size;
+               dev_priv->ring.map.type = 0;
+               dev_priv->ring.map.flags = 0;
+               dev_priv->ring.map.mtrr = 0;
 
-       drm_core_ioremap(&dev_priv->ring.map, dev);
+               drm_core_ioremap(&dev_priv->ring.map, dev);
 
-       if (dev_priv->ring.map.handle == NULL) {
-               i915_dma_cleanup(dev);
-               DRM_ERROR("can not ioremap virtual address for"
-                         " ring buffer\n");
-               return -ENOMEM;
-       }
+               if (dev_priv->ring.map.handle == NULL) {
+                       i915_dma_cleanup(dev);
+                       DRM_ERROR("can not ioremap virtual address for"
+                                 " ring buffer\n");
+                       return -ENOMEM;
+               }
 
-       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+               dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+       }
 
        dev_priv->cpp = init->cpp;
 
@@ -1188,6 +1188,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index 0fa292d..ce01640 100644 (file)
@@ -182,6 +182,8 @@ typedef struct drm_i915_sarea {
 #define DRM_I915_GEM_UNPIN     0x16
 #define DRM_I915_GEM_BUSY      0x17
 #define DRM_I915_GEM_THROTTLE  0x18
+#define DRM_I915_GEM_ENTERVT   0x19
+#define DRM_I915_GEM_LEAVEVT   0x20
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -207,6 +209,8 @@ typedef struct drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_UNPIN       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
 #define DRM_IOCTL_I915_GEM_THROTTLE    DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT     DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT     DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
 
 /* Asynchronous page flipping:
  */
index 8acef0c..3a22ae3 100644 (file)
@@ -74,14 +74,13 @@ struct drm_i915_validate_buffer;
 
 typedef struct _drm_i915_ring_buffer {
        int tail_mask;
-       unsigned long Start;
-       unsigned long End;
        unsigned long Size;
        u8 *virtual_start;
        int head;
        int tail;
        int space;
        drm_local_map_t map;
+       struct drm_gem_object *ring_obj;
 } drm_i915_ring_buffer_t;
 
 struct mem_block {
@@ -290,6 +289,16 @@ typedef struct drm_i915_private {
                struct work_struct retire_task;
                
                uint32_t next_gem_seqno;
+
+               /**
+                * Flag if the X Server, and thus DRM, is not currently in
+                * control of the device.
+                *
+                * This is set between LeaveVT and EnterVT.  It needs to be
+                * replaced with a semaphore.  It also needs to be
+                * transitioned away from for kernel modesetting.
+                */
+               int suspended;
        } mm;
 
        struct work_struct user_interrupt_task;
@@ -462,8 +471,14 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv);
 int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_set_domain(struct drm_gem_object *obj,
                        struct drm_file *file_priv,
                        uint32_t read_domains,
index 37d85f4..077af1a 100644 (file)
@@ -575,6 +575,11 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = 0;
 
+       if (!dev_priv) {
+               DRM_ERROR("called with no initialization\n");
+               return -EINVAL;
+       }
+
        DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));