ftrace: Protect ftrace_graph_hash with ftrace_sync
authorSteven Rostedt (VMware) <rostedt@goodmis.org>
Wed, 5 Feb 2020 14:20:32 +0000 (09:20 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2020 12:35:28 +0000 (04:35 -0800)
[ Upstream commit 54a16ff6f2e50775145b210bcd94d62c3c2af117 ]

As function_graph tracer can run when RCU is not "watching", it can not be
protected by synchronize_rcu() it requires running a task on each CPU before
it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used.

Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72
Cc: stable@vger.kernel.org
Fixes: b9b0c831bed26 ("ftrace: Convert graph filter to use hash tables")
Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/trace/ftrace.c
kernel/trace/trace.h

index d297a8b..407d8bf 100644 (file)
@@ -5378,8 +5378,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_rcu();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               schedule_on_each_cpu(ftrace_sync);
 
                free_ftrace_hash(old_hash);
        }
index 0864772..a3c29d5 100644 (file)
@@ -947,6 +947,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
 
@@ -999,6 +1000,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
         * Have to open code "rcu_dereference_sched()" because the
         * function graph tracer can be called when RCU is not
         * "watching".
+        * Protected with schedule_on_each_cpu(ftrace_sync)
         */
        notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
                                                 !preemptible());