static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
if (ret)
- return ret;
+ goto error;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
- return ret;
+ goto error;
}
ret = allocate_ring_buffer(ring);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
- return ret;
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
ret = i915_cmd_parser_init_ring(ring);
if (ret)
- return ret;
+ goto error;
+
+ ret = ring->init(ring);
+ if (ret)
+ goto error;
+
+ return 0;
- return ring->init(ring);
+error:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
}
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
cleanup_status_page(ring);
i915_cmd_parser_fini_ring(ring);
+
+ kfree(ring->buffer);
+ ring->buffer = NULL;
}
static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
+ if (ringbuf == NULL) {
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
+ }
+
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_ringbuf;
}
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_ringbuf;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
- return ret;
+ goto err_vstart;
}
return 0;
+
+err_vstart:
+ iounmap(ring->virtual_start);
+err_ringbuf:
+ kfree(ringbuf);
+ ring->buffer = NULL;
+ return ret;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
bool deadlock;
};
+struct intel_ringbuffer {
+ struct drm_i915_gem_object *obj;
+ void __iomem *virtual_start;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+};
+
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
+ struct intel_ringbuffer *buffer;
u32 head;
u32 tail;
static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
- return ring->obj != NULL;
+ return ring->buffer && ring->obj;
}
static inline unsigned