drm/i915: Show RING registers through debugfs
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 4 Oct 2016 20:11:31 +0000 (21:11 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 5 Oct 2016 07:40:06 +0000 (08:40 +0100)
Knowing where the RINGs are pointing is extremely useful in diagnosing
if the engines are executing the ringbuffers you expect - and igt may be
suppressing the usual method of looking in the GPU error state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161004201132.21801-7-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index eb91444..fe2bc48 100644 (file)
@@ -645,6 +645,23 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static void print_request(struct seq_file *m,
+                         struct drm_i915_gem_request *rq,
+                         const char *prefix)
+{
+       struct pid *pid = rq->ctx->pid;
+       struct task_struct *task;
+
+       rcu_read_lock();
+       task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
+       seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
+                  rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno,
+                  jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+                  task ? task->comm : "<unknown>",
+                  task ? task->pid : -1);
+       rcu_read_unlock();
+}
+
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -668,19 +685,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
                        continue;
 
                seq_printf(m, "%s requests: %d\n", engine->name, count);
-               list_for_each_entry(req, &engine->request_list, link) {
-                       struct pid *pid = req->ctx->pid;
-                       struct task_struct *task;
-
-                       rcu_read_lock();
-                       task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
-                       seq_printf(m, "    %x @ %d: %s [%d]\n",
-                                  req->fence.seqno,
-                                  (int) (jiffies - req->emitted_jiffies),
-                                  task ? task->comm : "<unknown>",
-                                  task ? task->pid : -1);
-                       rcu_read_unlock();
-               }
+               list_for_each_entry(req, &engine->request_list, link)
+                       print_request(m, req, "    ");
 
                any++;
        }
@@ -2046,84 +2052,6 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
        return 0;
 }
 
-static int i915_execlists(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_engine_cs *engine;
-       u32 status_pointer;
-       u8 read_pointer;
-       u8 write_pointer;
-       u32 status;
-       u32 ctx_id;
-       struct list_head *cursor;
-       int i, ret;
-
-       if (!i915.enable_execlists) {
-               seq_puts(m, "Logical Ring Contexts are disabled\n");
-               return 0;
-       }
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       intel_runtime_pm_get(dev_priv);
-
-       for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_request *head_req = NULL;
-               int count = 0;
-
-               seq_printf(m, "%s\n", engine->name);
-
-               status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
-               ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
-               seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
-                          status, ctx_id);
-
-               status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-               seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
-
-               read_pointer = GEN8_CSB_READ_PTR(status_pointer);
-               write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-               if (read_pointer > write_pointer)
-                       write_pointer += GEN8_CSB_ENTRIES;
-               seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
-                          read_pointer, write_pointer);
-
-               for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
-                       status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
-                       ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
-
-                       seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
-                                  i, status, ctx_id);
-               }
-
-               spin_lock_bh(&engine->execlist_lock);
-               list_for_each(cursor, &engine->execlist_queue)
-                       count++;
-               head_req = list_first_entry_or_null(&engine->execlist_queue,
-                                                   struct drm_i915_gem_request,
-                                                   execlist_link);
-               spin_unlock_bh(&engine->execlist_lock);
-
-               seq_printf(m, "\t%d requests in queue\n", count);
-               if (head_req) {
-                       seq_printf(m, "\tHead request context: %u\n",
-                                  head_req->ctx->hw_id);
-                       seq_printf(m, "\tHead request tail: %u\n",
-                                  head_req->tail);
-               }
-
-               seq_putc(m, '\n');
-       }
-
-       intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
 static const char *swizzle_string(unsigned swizzle)
 {
        switch (swizzle) {
@@ -3136,6 +3064,133 @@ static int i915_display_info(struct seq_file *m, void *unused)
        return 0;
 }
 
+static int i915_engine_info(struct seq_file *m, void *unused)
+{
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_engine_cs *engine;
+
+       for_each_engine(engine, dev_priv) {
+               struct intel_breadcrumbs *b = &engine->breadcrumbs;
+               struct drm_i915_gem_request *rq;
+               struct rb_node *rb;
+               u64 addr;
+
+               seq_printf(m, "%s\n", engine->name);
+               seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+                          intel_engine_get_seqno(engine),
+                          engine->last_submitted_seqno,
+                          engine->hangcheck.seqno,
+                          engine->hangcheck.score);
+
+               rcu_read_lock();
+
+               seq_printf(m, "\tRequests:\n");
+
+               rq = list_first_entry(&engine->request_list,
+                               struct drm_i915_gem_request, link);
+               if (&rq->link != &engine->request_list)
+                       print_request(m, rq, "\t\tfirst  ");
+
+               rq = list_last_entry(&engine->request_list,
+                               struct drm_i915_gem_request, link);
+               if (&rq->link != &engine->request_list)
+                       print_request(m, rq, "\t\tlast   ");
+
+               rq = i915_gem_find_active_request(engine);
+               if (rq) {
+                       print_request(m, rq, "\t\tactive ");
+                       seq_printf(m,
+                                  "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+                                  rq->head, rq->postfix, rq->tail,
+                                  rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+                                  rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+               }
+
+               seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+                          I915_READ(RING_START(engine->mmio_base)),
+                          rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+               seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
+                          I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+                          rq ? rq->ring->head : 0);
+               seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
+                          I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+                          rq ? rq->ring->tail : 0);
+               seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
+                          I915_READ(RING_CTL(engine->mmio_base)),
+                          I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+               rcu_read_unlock();
+
+               addr = intel_engine_get_active_head(engine);
+               seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
+                          upper_32_bits(addr), lower_32_bits(addr));
+               addr = intel_engine_get_last_batch_head(engine);
+               seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+                          upper_32_bits(addr), lower_32_bits(addr));
+
+               if (i915.enable_execlists) {
+                       u32 ptr, read, write;
+
+                       seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
+                                  I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+                                  I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+                       ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+                       read = GEN8_CSB_READ_PTR(ptr);
+                       write = GEN8_CSB_WRITE_PTR(ptr);
+                       seq_printf(m, "\tExeclist CSB read %d, write %d\n",
+                                  read, write);
+                       if (read >= GEN8_CSB_ENTRIES)
+                               read = 0;
+                       if (write >= GEN8_CSB_ENTRIES)
+                               write = 0;
+                       if (read > write)
+                               write += GEN8_CSB_ENTRIES;
+                       while (read < write) {
+                               unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+
+                               seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+                                          idx,
+                                          I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+                                          I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+                       }
+
+                       rcu_read_lock();
+                       rq = READ_ONCE(engine->execlist_port[0].request);
+                       if (rq)
+                               print_request(m, rq, "\t\tELSP[0] ");
+                       else
+                               seq_printf(m, "\t\tELSP[0] idle\n");
+                       rq = READ_ONCE(engine->execlist_port[1].request);
+                       if (rq)
+                               print_request(m, rq, "\t\tELSP[1] ");
+                       else
+                               seq_printf(m, "\t\tELSP[1] idle\n");
+                       rcu_read_unlock();
+               } else if (INTEL_GEN(dev_priv) > 6) {
+                       seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+                                  I915_READ(RING_PP_DIR_BASE(engine)));
+                       seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+                                  I915_READ(RING_PP_DIR_BASE_READ(engine)));
+                       seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+                                  I915_READ(RING_PP_DIR_DCLV(engine)));
+               }
+
+               spin_lock(&b->lock);
+               for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+                       struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+                       seq_printf(m, "\t%s [%d] waiting for %x\n",
+                                  w->tsk->comm, w->tsk->pid, w->seqno);
+               }
+               spin_unlock(&b->lock);
+
+               seq_puts(m, "\n");
+       }
+
+       return 0;
+}
+
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -5290,7 +5345,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
        {"i915_context_status", i915_context_status, 0},
        {"i915_dump_lrc", i915_dump_lrc, 0},
-       {"i915_execlists", i915_execlists, 0},
        {"i915_forcewake_domains", i915_forcewake_domains, 0},
        {"i915_swizzle_info", i915_swizzle_info, 0},
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -5302,6 +5356,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_power_domain_info", i915_power_domain_info, 0},
        {"i915_dmc_info", i915_dmc_info, 0},
        {"i915_display_info", i915_display_info, 0},
+       {"i915_engine_info", i915_engine_info, 0},
        {"i915_semaphore_status", i915_semaphore_status, 0},
        {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
        {"i915_dp_mst_info", i915_dp_mst_info, 0},
index e405f10..d00ec80 100644 (file)
@@ -334,3 +334,33 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        intel_engine_cleanup_cmd_parser(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 }
+
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       u64 acthd;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+                                        RING_ACTHD_UDW(engine->mmio_base));
+       else if (INTEL_GEN(dev_priv) >= 4)
+               acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+       else
+               acthd = I915_READ(ACTHD);
+
+       return acthd;
+}
+
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       u64 bbaddr;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
+                                         RING_BBADDR_UDW(engine->mmio_base));
+       else
+               bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+
+       return bbaddr;
+}
index 8206e2a..35f359e 100644 (file)
@@ -405,22 +405,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
        return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       u64 acthd;
-
-       if (INTEL_GEN(dev_priv) >= 8)
-               acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
-                                        RING_ACTHD_UDW(engine->mmio_base));
-       else if (INTEL_GEN(dev_priv) >= 4)
-               acthd = I915_READ(RING_ACTHD(engine->mmio_base));
-       else
-               acthd = I915_READ(ACTHD);
-
-       return acthd;
-}
-
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
index 66553bd..498931f 100644 (file)
@@ -541,6 +541,8 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);