DEFINE_SRCU(tasks_rcu_exit_srcu);
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
-static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 3;
+static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
module_param(rcu_task_stall_timeout, int, 0644);
/* Post an RCU-tasks callback. */
}
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
-/* See if the current task has stopped holding out, remove from list if so. */
-static void check_holdout_task(struct task_struct *t)
+/* See if tasks are still holding out, complain if so. */
+static void check_holdout_task(struct task_struct *t,
+ bool needreport, bool *firstreport)
{
if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
ACCESS_ONCE(t->rcu_tasks_holdout) = false;
list_del_rcu(&t->rcu_tasks_holdout_list);
put_task_struct(t);
+ return;
}
+ if (!needreport)
+ return;
+ if (*firstreport) {
+ pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
+ *firstreport = false;
+ }
+ sched_show_task(t);
}
/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
{
unsigned long flags;
struct task_struct *g, *t;
+ unsigned long lastreport;
struct rcu_head *list;
struct rcu_head *next;
LIST_HEAD(rcu_tasks_holdouts);
* of holdout tasks, removing any that are no longer
* holdouts. When the list is empty, we are done.
*/
+ lastreport = jiffies;
while (!list_empty(&rcu_tasks_holdouts)) {
+ bool firstreport;
+ bool needreport;
+ int rtst;
+
schedule_timeout_interruptible(HZ);
+ rtst = ACCESS_ONCE(rcu_task_stall_timeout);
+ needreport = rtst > 0 &&
+ time_after(jiffies, lastreport + rtst);
+ if (needreport)
+ lastreport = jiffies;
+ firstreport = true;
WARN_ON(signal_pending(current));
rcu_read_lock();
list_for_each_entry_rcu(t, &rcu_tasks_holdouts,
rcu_tasks_holdout_list)
- check_holdout_task(t);
+ check_holdout_task(t, needreport, &firstreport);
rcu_read_unlock();
}