seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
&dev_priv->gpu_error.missed_irq_rings)),
yesno(engine->hangcheck.stalled));
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
I915_READ(RING_PP_DIR_DCLV(engine)));
}
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
seq_puts(m, "\n");
}
struct intel_wait *wait;
unsigned int result = 0;
- wait = b->first_wait;
+ lockdep_assert_held(&b->irq_lock);
+
+ wait = b->irq_wait;
if (wait) {
result = ENGINE_WAKEUP_WAITER;
if (wake_up_process(wait->tsk))
unsigned long flags;
unsigned int result;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irqsave(&b->irq_lock, flags);
result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
return result;
}
* coherent seqno check.
*/
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (!__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
if (!b->irq_armed)
return;
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- lockdep_assert_held(&b->lock);
+ lockdep_assert_held(&b->irq_lock);
if (b->irq_enabled) {
irq_disable(engine);
if (!b->irq_armed)
return;
- spin_lock_irqsave(&b->lock, flags);
+ spin_lock_irqsave(&b->irq_lock, flags);
/* We only disarm the irq when we are idle (all requests completed),
* so if there remains a sleeping waiter, it missed the request
__intel_engine_disarm_breadcrumbs(engine);
- spin_unlock_irqrestore(&b->lock, flags);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
container_of(b, struct intel_engine_cs, breadcrumbs);
struct drm_i915_private *i915 = engine->i915;
- lockdep_assert_held(&b->lock);
+ lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
return;
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
struct intel_wait *wait)
{
- lockdep_assert_held(&b->lock);
+ lockdep_assert_held(&b->rb_lock);
/* This request is completed, so remove it from the tree, mark it as
* complete, and *then* wake up the associated task.
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ spin_lock(&b->irq_lock);
GEM_BUG_ON(!b->irq_armed);
- b->first_wait = to_wait(next);
+ b->irq_wait = to_wait(next);
+ spin_unlock(&b->irq_lock);
/* We always wake up the next waiter that takes over as the bottom-half
* as we may delegate not only the irq-seqno barrier to the next waiter
}
if (first) {
+ spin_lock(&b->irq_lock);
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
- b->first_wait = wait;
+ b->irq_wait = wait;
/* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt.
* Either we miss the interrupt whilst programming the hardware,
* and so we miss the wake up.
*/
__intel_breadcrumbs_enable_irq(b);
+ spin_unlock(&b->irq_lock);
}
- GEM_BUG_ON(!b->first_wait);
- GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+ GEM_BUG_ON(!b->irq_wait);
+ GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
return first;
}
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
return first;
}
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- lockdep_assert_held(&b->lock);
+ lockdep_assert_held(&b->rb_lock);
if (RB_EMPTY_NODE(&wait->node))
goto out;
- if (b->first_wait == wait) {
+ if (b->irq_wait == wait) {
const int priority = wakeup_priority(b, wait->tsk);
struct rb_node *next;
rb_erase(&wait->node, &b->waiters);
out:
- GEM_BUG_ON(b->first_wait == wait);
+ GEM_BUG_ON(b->irq_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) !=
- (b->first_wait ? &b->first_wait->node : NULL));
+ (b->irq_wait ? &b->irq_wait->node : NULL));
}
void intel_engine_remove_wait(struct intel_engine_cs *engine,
if (RB_EMPTY_NODE(&wait->node))
return;
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
__intel_engine_remove_wait(engine, wait);
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
}
static bool signal_valid(const struct drm_i915_gem_request *request)
dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
/* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt.
rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request);
} else {
request->signaling.wait.seqno = seqno;
i915_gem_request_get(request);
- spin_lock(&b->lock);
+ spin_lock(&b->rb_lock);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
if (first)
rcu_assign_pointer(b->first_signal, request);
- spin_unlock(&b->lock);
+ spin_unlock(&b->rb_lock);
if (wakeup)
wake_up_process(b->signaler);
lockdep_assert_held(&request->lock);
GEM_BUG_ON(!request->signaling.wait.seqno);
- spin_lock(&b->lock);
+ spin_lock(&b->rb_lock);
if (!RB_EMPTY_NODE(&request->signaling.node)) {
if (request == rcu_access_pointer(b->first_signal)) {
__intel_engine_remove_wait(engine, &request->signaling.wait);
- spin_unlock(&b->lock);
+ spin_unlock(&b->rb_lock);
request->signaling.wait.seqno = 0;
}
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct task_struct *tsk;
- spin_lock_init(&b->lock);
+ spin_lock_init(&b->rb_lock);
+ spin_lock_init(&b->irq_lock);
+
setup_timer(&b->fake_irq,
intel_breadcrumbs_fake_irq,
(unsigned long)engine);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->irq_lock);
if (b->irq_enabled)
irq_enable(engine);
if (b->irq_armed)
enable_fake_irq(b);
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->irq_lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The engines should be idle and all requests accounted for! */
- WARN_ON(READ_ONCE(b->first_wait));
+ WARN_ON(READ_ONCE(b->irq_wait));
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
WARN_ON(rcu_access_pointer(b->first_signal));
WARN_ON(!RB_EMPTY_ROOT(&b->signals));
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool busy = false;
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
- if (b->first_wait) {
- wake_up_process(b->first_wait->tsk);
+ if (b->irq_wait) {
+ wake_up_process(b->irq_wait->tsk);
busy |= intel_engine_flag(engine);
}
busy |= intel_engine_flag(engine);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
return busy;
}