resubmit_virtual_request(rq, ve);
if (READ_ONCE(ve->request))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
}
static void __execlists_schedule_out(struct i915_request * const rq,
}
static bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
+reset_in_progress(const struct intel_engine_cs *engine)
{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+ return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
}
static __maybe_unused noinline bool
trace_ports(execlists, msg, execlists->pending);
/* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
+ if (reset_in_progress(engine))
return true;
if (!execlists->pending[0]) {
* its timeslice, so recheck.
*/
if (!timer_pending(&el->timer))
- tasklet_hi_schedule(&el->tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
return;
}
* access. Either we are inside the tasklet, or the tasklet is disabled
* and we assume that is only inside the reset paths and so serialised.
*/
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
+ GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
+ !reset_in_progress(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
engine->sched_engine->queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
spin_unlock_irq(&engine->sched_engine->lock);
ENGINE_TRACE(engine, "reset for %s\n", msg);
/* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ tasklet_disable_nosync(&engine->sched_engine->tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */
execlists_capture(engine);
intel_engine_reset(engine, msg);
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock);
}
*/
static void execlists_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct intel_engine_cs * const engine = sched_engine->private_data;
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
intel_engine_signal_breadcrumbs(engine);
if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
static void __execlists_kick(struct intel_engine_execlists *execlists)
{
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
/* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
#define execlists_kick(t, member) \
static void execlists_reset_prepare(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
+ atomic_read(&engine->sched_engine->tasklet.count));
/*
* Prevent request submission to the hardware until we have
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
+ __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
+ GEM_BUG_ON(!reset_in_progress(engine));
/*
* We stop engines, otherwise we might get failed reset and a
static void nop_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct intel_engine_cs * const engine = sched_engine->private_data;
/* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
sched_engine->queue_priority_hint = INT_MIN;
sched_engine->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.callback = nop_submission_tasklet;
+ GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
+ engine->sched_engine->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
rcu_read_unlock();
* reset as the next level of recovery, and as a final resort we
* will declare the device wedged.
*/
- GEM_BUG_ON(!reset_in_progress(execlists));
+ GEM_BUG_ON(!reset_in_progress(engine));
/* And kick in case we missed a new request submission. */
- if (__tasklet_enable(&execlists->tasklet))
+ if (__tasklet_enable(&engine->sched_engine->tasklet))
__execlists_kick(execlists);
ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
+ atomic_read(&engine->sched_engine->tasklet.count));
}
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
* so kiss.
*/
if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&sched_engine->tasklet);
unlock:
rcu_read_unlock();
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
engine->sched_engine->kick_backend = kick_execlists;
- engine->execlists.tasklet.callback = execlists_submission_tasklet;
+ engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
}
static void execlists_shutdown(struct intel_engine_cs *engine)
/* Synchronise with residual timers and any softirq they raise */
del_timer_sync(&engine->execlists.timer);
del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
+ tasklet_kill(&engine->sched_engine->tasklet);
}
static void execlists_release(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
- tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
+ tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
- tasklet_kill(&ve->base.execlists.tasklet);
+ tasklet_kill(&ve->base.sched_engine->tasklet);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
spin_lock_irq(&sibling->sched_engine->lock);
- /* Detachment is lazily performed in the execlists tasklet */
+ /* Detachment is lazily performed in the sched_engine->tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irq(&sibling->sched_engine->lock);
}
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
lrc_fini(&ve->context);
static void virtual_submission_tasklet(struct tasklet_struct *t)
{
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
struct virtual_engine * const ve =
- from_tasklet(ve, t, base.execlists.tasklet);
- const int prio = READ_ONCE(ve->base.sched_engine->queue_priority_hint);
+ (struct virtual_engine *)sched_engine->private_data;
+ const int prio = READ_ONCE(sched_engine->queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
if (first && prio > sibling->sched_engine->queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
+ tasklet_hi_schedule(&sibling->sched_engine->tasklet);
unlock_engine:
spin_unlock_irq(&sibling->sched_engine->lock);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
unlock:
spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
err = -ENOMEM;
goto err_put;
}
+ ve->base.sched_engine->private_data = &ve->base;
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
ve->base.bond_execute = virtual_bond_execute;
INIT_LIST_HEAD(virtual_queue(ve));
- tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
+ tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base);
* layering if we handle cloning of the requests and
* submitting a copy into each backend.
*/
- if (sibling->execlists.tasklet.callback !=
+ if (sibling->sched_engine->tasklet.callback !=
execlists_submission_tasklet) {
err = -ENODEV;
goto err_put;
static void guc_submission_tasklet(struct tasklet_struct *t)
{
- struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
+ struct i915_sched_engine *sched_engine =
+ from_tasklet(sched_engine, t, tasklet);
+ struct intel_engine_cs * const engine = sched_engine->private_data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq;
unsigned long flags;
{
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_signal_breadcrumbs(engine);
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
}
static void guc_reset_prepare(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
ENGINE_TRACE(engine, "\n");
/*
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- __tasklet_disable_sync_once(&execlists->tasklet);
+ __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
}
static void guc_reset_state(struct intel_context *ce,
static void guc_reset_finish(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (__tasklet_enable(&execlists->tasklet))
+ if (__tasklet_enable(&engine->sched_engine->tasklet))
/* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
+ atomic_read(&engine->sched_engine->tasklet.count));
}
/*
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
GEM_BUG_ON(list_empty(&rq->sched.link));
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&engine->sched_engine->tasklet);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
{
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
- tasklet_kill(&engine->execlists.tasklet);
+ tasklet_kill(&engine->sched_engine->tasklet);
intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine);
*/
GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
- tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
+ tasklet_setup(&engine->sched_engine->tasklet, guc_submission_tasklet);
guc_default_vfuncs(engine);
guc_default_irqs(engine);