void i915_gem_suspend(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
void i915_gem_resume(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
if (err)
goto err;
- GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
- ce->engine->name, ce->timeline->fence_context,
- ce->ring->head, ce->ring->tail);
+ CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
+ CE_TRACE(ce, "retire\n");
ce->ops->unpin(ce);
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
+ CE_TRACE(ce, "retire\n");
set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
+#define CE_TRACE(ce, fmt, ...) do { \
+ const struct intel_context *ce__ = (ce); \
+ ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \
+ ce__->timeline->fence_context, \
+ ##__VA_ARGS__); \
+} while (0)
+
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+#define ENGINE_TRACE(e, fmt, ...) do { \
+ const struct intel_engine_cs *e__ __maybe_unused = (e); \
+ GEM_TRACE("%s %s: " fmt, \
+ dev_name(e__->i915->drm.dev), e__->name, \
+ ##__VA_ARGS__); \
+} while (0)
+
/*
* The register defines to be used with the following macros need to accept a
* base param, e.g:
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
mode, MODE_IDLE, MODE_IDLE,
1000, stop_timeout(engine),
NULL)) {
- GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
err = -ETIMEDOUT;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
container_of(wf, typeof(*engine), wakeref);
void *map;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* We have to serialise all potential retirement paths with our
if (!switch_to_kernel_context(engine))
return -EBUSY;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
call_idle_barriers(engine); /* cleanup after wedging */
struct drm_i915_private;
+#define GT_TRACE(gt__, fmt, ...) do { \
+ typecheck(struct intel_gt, *(gt__)); \
+ GEM_TRACE("%s " fmt, dev_name(gt->i915->drm.dev), \
+ ##__VA_ARGS__); \
+} while (0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
i915_globals_unpark();
intel_wakeref_t wakeref = fetch_and_zero(>->awake);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
intel_gt_park_requests(gt);
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GEM_TRACE("force:%s\n", yesno(force));
+ GT_TRACE(gt, "force:%s", yesno(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
enum intel_engine_id id;
int err = 0;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
/*
* After resume, we may need to poke into the pinned kernel
intel_gt_sanitize(gt, false);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(>->uc);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
- GEM_TRACE("\n");
-
+ GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(>->uc);
* remain correctly ordered. And we defer to __i915_request_submit()
* so that all asynchronous waits are correctly handled.
*/
- GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
- __func__, engine->name, rq->fence.context, rq->fence.seqno);
+ ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
/* On resubmission of the active request, payload will be scrubbed */
if (i915_request_completed(rq))
if (!ports[0])
return;
- GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
- engine->name, msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
}
static __maybe_unused bool
last = last_active(execlists);
if (last) {
if (need_preempt(engine, last, rb)) {
- GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
record_preemption(execlists);
/*
last = NULL;
} else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) {
- GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
ring_set_paused(engine, 1);
defer_active(engine);
return; /* leave this for another */
}
- GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
- GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
- engine->name, execlists->queue_priority_hint,
- yesno(submit));
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
- engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1]);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
promote = gen12_csb_parse(execlists, buf + 2 * head);
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- GEM_TRACE("%s: preempt timeout %lu+%ums\n",
- engine->name,
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
intel_engine_reset(engine, "preemption time out");
tasklet_enable(&engine->execlists.tasklet);
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
- GEM_TRACE("%s: depth<-%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
/*
* Prevent request submission to the hardware until we have
restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
- engine->name, ce->ring->head, ce->ring->tail);
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
{
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
spin_lock_irqsave(&engine->active.lock, flags);
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
static int gen8_emit_bb_start(struct i915_request *rq,
mask = ve->siblings[0]->mask;
}
- GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
- ve->base.name,
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
return mask;
}
struct i915_request *old;
unsigned long flags;
- GEM_TRACE("%s: rq=%llx:%lld\n",
- ve->base.name,
- rq->fence.context,
- rq->fence.seqno);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+ ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
- GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
- engine->name, ring->head, ring->tail);
+ ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+ ring->head, ring->tail);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
*
* FIXME: Wa for more modern gens needs to be validated
*/
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING\n");
intel_uncore_write_fw(uncore,
RING_HEAD(base),
/* Check acts as a post */
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Prevent request submission to the hardware until we have
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
/*
if (!i915_request_completed(rq))
return false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_request_completed(rq));
struct intel_engine_cs *engine = request->engine;
bool result = false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
struct intel_ring *ring = rq->ring;
u32 *cs;
- GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, rq->fence.context, rq->fence.seqno);
+ RQ_TRACE(rq, "\n");
/*
* To ensure that this call will not fail, space for its emissions
struct i915_vma *vma;
};
+#define RQ_TRACE(rq, fmt, ...) do { \
+ const struct i915_request *rq__ = (rq); \
+ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \
+ rq__->fence.context, rq__->fence.seqno, \
+ hwsp_seqno(rq__), ##__VA_ARGS__); \
+} while (0)
+
enum {
/*
* I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.