2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
8 * Started by Frederic Weisbecker:
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
17 #include <linux/context_tracking.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched.h>
20 #include <linux/hardirq.h>
21 #include <linux/export.h>
23 DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
24 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
30 * user_enter - Inform the context tracking that the CPU is going to
31 * enter userspace mode.
33 * This function must be called right before we switch from the kernel
34 * to userspace, when it's guaranteed the remaining kernel instructions
35 * to execute won't use any RCU read side critical section because this
36 * function sets RCU in extended quiescent state.
43 * Some contexts may involve an exception occuring in an irq,
44 * leading to that nesting:
45 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
46 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
47 * helpers are enough to protect RCU uses inside the exception. So
48 * just return immediately if we detect we are in an IRQ.
53 /* Kernel threads aren't supposed to go to userspace */
54 WARN_ON_ONCE(!current->mm);
56 local_irq_save(flags);
57 if (__this_cpu_read(context_tracking.active) &&
58 __this_cpu_read(context_tracking.state) != IN_USER) {
60 * At this stage, only low level arch entry code remains and
61 * then we'll run in userspace. We can assume there won't be
62 * any RCU read-side critical section until the next call to
63 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
66 vtime_user_enter(current);
68 __this_cpu_write(context_tracking.state, IN_USER);
70 local_irq_restore(flags);
75 * preempt_schedule_context - preempt_schedule called by tracing
77 * The tracing infrastructure uses preempt_enable_notrace to prevent
78 * recursion and tracing preempt enabling caused by the tracing
79 * infrastructure itself. But as tracing can happen in areas coming
80 * from userspace or just about to enter userspace, a preempt enable
81 * can occur before user_exit() is called. This will cause the scheduler
82 * to be called when the system is still in usermode.
84 * To prevent this, the preempt_enable_notrace will use this function
85 * instead of preempt_schedule() to exit user context if needed before
86 * calling the scheduler.
88 void __sched notrace preempt_schedule_context(void)
90 enum ctx_state prev_ctx;
92 if (likely(!preemptible()))
96 * Need to disable preemption in case user_exit() is traced
97 * and the tracer calls preempt_enable_notrace() causing
98 * an infinite recursion.
100 preempt_disable_notrace();
101 prev_ctx = exception_enter();
102 preempt_enable_no_resched_notrace();
106 preempt_disable_notrace();
107 exception_exit(prev_ctx);
108 preempt_enable_notrace();
110 EXPORT_SYMBOL_GPL(preempt_schedule_context);
111 #endif /* CONFIG_PREEMPT */
114 * user_exit - Inform the context tracking that the CPU is
115 * exiting userspace mode and entering the kernel.
117 * This function must be called after we entered the kernel from userspace
118 * before any use of RCU read side critical section. This potentially include
119 * any high level kernel code like syscalls, exceptions, signal handling, etc...
121 * This call supports re-entrancy. This way it can be called from any exception
122 * handler without needing to know if we came from userspace or not.
131 local_irq_save(flags);
132 if (__this_cpu_read(context_tracking.state) == IN_USER) {
134 * We are going to run code that may use RCU. Inform
135 * RCU core about that (ie: we may need the tick again).
138 vtime_user_exit(current);
139 __this_cpu_write(context_tracking.state, IN_KERNEL);
141 local_irq_restore(flags);
144 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
145 void guest_enter(void)
147 if (vtime_accounting_enabled())
148 vtime_guest_enter(current);
150 current->flags |= PF_VCPU;
152 EXPORT_SYMBOL_GPL(guest_enter);
154 void guest_exit(void)
156 if (vtime_accounting_enabled())
157 vtime_guest_exit(current);
159 current->flags &= ~PF_VCPU;
161 EXPORT_SYMBOL_GPL(guest_exit);
162 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
166 * context_tracking_task_switch - context switch the syscall callbacks
167 * @prev: the task that is being switched out
168 * @next: the task that is being switched in
170 * The context tracking uses the syscall slow path to implement its user-kernel
171 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
172 * path on CPUs that don't do context tracking.
174 * But we need to clear the flag on the previous task because it may later
175 * migrate to some CPU that doesn't do the context tracking. As such the TIF
176 * flag may not be desired there.
178 void context_tracking_task_switch(struct task_struct *prev,
179 struct task_struct *next)
181 if (__this_cpu_read(context_tracking.active)) {
182 clear_tsk_thread_flag(prev, TIF_NOHZ);
183 set_tsk_thread_flag(next, TIF_NOHZ);