#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t);
+void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
struct task_struct *t = current;
WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ if (t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers
rcu_lock_acquire(&rcu_trace_lock_map);
}
rcu_lock_release(&rcu_trace_lock_map);
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- if (likely(!READ_ONCE(t->trc_reader_need_end)) || nesting)
+ if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
- rcu_read_unlock_trace_special(t);
+ }
+ rcu_read_unlock_trace_special(t, nesting);
}
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
"RCU Tasks Trace");
/* If we are the last reader, wake up the grace-period kthread. */
-void rcu_read_unlock_trace_special(struct task_struct *t)
+void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
{
- WRITE_ONCE(t->trc_reader_need_end, false);
- if (atomic_dec_and_test(&trc_n_readers_need_end))
+ int nq = t->trc_reader_special.b.need_qs;
+
+ if (t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers.
+ // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
+ if (nq)
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
+ if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
wake_up(&trc_wait);
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
// Get here if the task is in a read-side critical section. Set
// its state so that it will awaken the grace-period kthread upon
// exit from that critical section.
- WARN_ON_ONCE(t->trc_reader_need_end);
- WRITE_ONCE(t->trc_reader_need_end, true);
+ WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
reset_ipi:
// Allow future IPIs to be sent on CPU and for task.
// exit from that critical section.
if (unlikely(t->trc_reader_nesting)) {
atomic_inc(&trc_n_readers_need_end); // One more to wait on.
- WARN_ON_ONCE(t->trc_reader_need_end);
- WRITE_ONCE(t->trc_reader_need_end, true);
+ WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
}
return true;
}
static void rcu_tasks_trace_pertask(struct task_struct *t,
struct list_head *hop)
{
- WRITE_ONCE(t->trc_reader_need_end, false);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
WRITE_ONCE(t->trc_reader_checked, false);
t->trc_ipi_to_cpu = -1;
trc_wait_for_one_reader(t, hop);
".i"[is_idle_task(t)],
".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
t->trc_reader_nesting,
- " N"[!!t->trc_reader_need_end],
+ " N"[!!t->trc_reader_special.b.need_qs],
cpu);
sched_show_task(t);
}
break; // Count reached zero.
// Stall warning time, so make a list of the offenders.
for_each_process_thread(g, t)
- if (READ_ONCE(t->trc_reader_need_end))
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
trc_add_holdout(t, &holdouts);
firstreport = true;
list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
- if (READ_ONCE(t->trc_reader_need_end)) {
+ if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
show_stalled_task_trace(t, &firstreport);
trc_del_holdout(t);
}
WRITE_ONCE(t->trc_reader_checked, true);
WARN_ON_ONCE(t->trc_reader_nesting);
WRITE_ONCE(t->trc_reader_nesting, 0);
- if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
- rcu_read_unlock_trace_special(t);
+ if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
+ rcu_read_unlock_trace_special(t, 0);
}
/**