static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- struct i915_request *next = READ_ONCE(ve->request);
- if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
+ if (READ_ONCE(ve->request))
tasklet_hi_schedule(&ve->base.execlists.tasklet);
}
struct i915_request *rq = READ_ONCE(ve->request);
/* lazily cleanup after another engine handled rq */
- if (!rq) {
+ if (!rq || !virtual_matches(ve, rq, engine)) {
rb_erase_cached(rb, &el->virtual);
RB_CLEAR_NODE(rb);
rb = rb_first_cached(&el->virtual);
continue;
}
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
return ve;
}
if (unlikely(!mask))
return;
- local_irq_disable();
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
struct ve_node * const node = &ve->nodes[sibling->id];
if (!READ_ONCE(ve->request))
break; /* already handled by a sibling's tasklet */
+ spin_lock_irq(&sibling->active.lock);
+
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->active.lock);
rb_erase_cached(&node->rb,
&sibling->execlists.virtual);
RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->active.lock);
}
- continue;
- }
- spin_lock(&sibling->active.lock);
+ goto unlock_engine;
+ }
- if (!RB_EMPTY_NODE(&node->rb)) {
+ if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
/*
* Cheat and avoid rebalancing the tree if we can
* reuse this node in situ.
if (first && prio > sibling->execlists.queue_priority_hint)
tasklet_hi_schedule(&sibling->execlists.tasklet);
- spin_unlock(&sibling->active.lock);
+unlock_engine:
+ spin_unlock_irq(&sibling->active.lock);
+
+ if (intel_context_inflight(&ve->context))
+ break;
}
- local_irq_enable();
}
static void virtual_submit_request(struct i915_request *rq)