1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/context_tracking.h>
4 #include <linux/entry-common.h>
5 #include <linux/resume_user_mode.h>
6 #include <linux/highmem.h>
7 #include <linux/jump_label.h>
8 #include <linux/kmsan.h>
9 #include <linux/livepatch.h>
10 #include <linux/audit.h>
11 #include <linux/tick.h>
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/syscalls.h>
18 /* See comment for enter_from_user_mode() in entry-common.h */
19 static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
21 arch_enter_from_user_mode(regs);
22 lockdep_hardirqs_off(CALLER_ADDR0);
24 CT_WARN_ON(__ct_state() != CONTEXT_USER);
27 instrumentation_begin();
28 kmsan_unpoison_entry_regs(regs);
29 trace_hardirqs_off_finish();
30 instrumentation_end();
33 void noinstr enter_from_user_mode(struct pt_regs *regs)
35 __enter_from_user_mode(regs);
38 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
40 if (unlikely(audit_context())) {
41 unsigned long args[6];
43 syscall_get_arguments(current, regs, args);
44 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
48 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
54 * Handle Syscall User Dispatch. This must comes first, since
55 * the ABI here can be something that doesn't make sense for
56 * other syscall_work features.
58 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
59 if (syscall_user_dispatch(regs))
64 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
65 ret = ptrace_report_syscall_entry(regs);
66 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
70 /* Do seccomp after ptrace, to catch any tracer changes. */
71 if (work & SYSCALL_WORK_SECCOMP) {
72 ret = __secure_computing(NULL);
77 /* Either of the above might have changed the syscall number */
78 syscall = syscall_get_nr(current, regs);
80 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
81 trace_sys_enter(regs, syscall);
83 syscall_enter_audit(regs, syscall);
85 return ret ? : syscall;
88 static __always_inline long
89 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
91 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
93 if (work & SYSCALL_WORK_ENTER)
94 syscall = syscall_trace_enter(regs, syscall, work);
99 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
101 return __syscall_enter_from_user_work(regs, syscall);
104 noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
108 __enter_from_user_mode(regs);
110 instrumentation_begin();
112 ret = __syscall_enter_from_user_work(regs, syscall);
113 instrumentation_end();
118 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
120 __enter_from_user_mode(regs);
121 instrumentation_begin();
123 instrumentation_end();
126 /* See comment for exit_to_user_mode() in entry-common.h */
127 static __always_inline void __exit_to_user_mode(void)
129 instrumentation_begin();
130 trace_hardirqs_on_prepare();
131 lockdep_hardirqs_on_prepare();
132 instrumentation_end();
135 arch_exit_to_user_mode();
136 lockdep_hardirqs_on(CALLER_ADDR0);
139 void noinstr exit_to_user_mode(void)
141 __exit_to_user_mode();
144 /* Workaround to allow gradual conversion of architecture code */
145 void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
147 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
148 unsigned long ti_work)
151 * Before returning to user space ensure that all pending work
152 * items have been completed.
154 while (ti_work & EXIT_TO_USER_MODE_WORK) {
156 local_irq_enable_exit_to_user(ti_work);
158 if (ti_work & _TIF_NEED_RESCHED)
161 if (ti_work & _TIF_UPROBE)
162 uprobe_notify_resume(regs);
164 if (ti_work & _TIF_PATCH_PENDING)
165 klp_update_patch_state(current);
167 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
168 arch_do_signal_or_restart(regs);
170 if (ti_work & _TIF_NOTIFY_RESUME)
171 resume_user_mode_work(regs);
173 /* Architecture specific TIF work */
174 arch_exit_to_user_mode_work(regs, ti_work);
177 * Disable interrupts and reevaluate the work flags as they
178 * might have changed while interrupts and preemption was
181 local_irq_disable_exit_to_user();
183 /* Check if any of the above work has queued a deferred wakeup */
184 tick_nohz_user_enter_prepare();
186 ti_work = read_thread_flags();
189 /* Return the latest work state for arch_exit_to_user_mode() */
193 static void exit_to_user_mode_prepare(struct pt_regs *regs)
195 unsigned long ti_work;
197 lockdep_assert_irqs_disabled();
199 /* Flush pending rcuog wakeup before the last need_resched() check */
200 tick_nohz_user_enter_prepare();
202 ti_work = read_thread_flags();
203 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
204 ti_work = exit_to_user_mode_loop(regs, ti_work);
206 arch_exit_to_user_mode_prepare(regs, ti_work);
208 /* Ensure that the address limit is intact and no locks are held */
209 addr_limit_user_check();
211 lockdep_assert_irqs_disabled();
216 * If SYSCALL_EMU is set, then the only reason to report is when
217 * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
218 * instruction has been already reported in syscall_enter_from_user_mode().
220 static inline bool report_single_step(unsigned long work)
222 if (work & SYSCALL_WORK_SYSCALL_EMU)
225 return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
228 static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
233 * If the syscall was rolled back due to syscall user dispatching,
234 * then the tracers below are not invoked for the same reason as
235 * the entry side was not invoked in syscall_trace_enter(): The ABI
236 * of these syscalls is unknown.
238 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
239 if (unlikely(current->syscall_dispatch.on_dispatch)) {
240 current->syscall_dispatch.on_dispatch = false;
245 audit_syscall_exit(regs);
247 if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
248 trace_sys_exit(regs, syscall_get_return_value(current, regs));
250 step = report_single_step(work);
251 if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
252 ptrace_report_syscall_exit(regs, step);
256 * Syscall specific exit to user mode preparation. Runs with interrupts
259 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
261 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
262 unsigned long nr = syscall_get_nr(current, regs);
264 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
266 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
267 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
274 * Do one-time syscall specific work. If these work items are
275 * enabled, we want to run them exactly once per syscall exit with
276 * interrupts enabled.
278 if (unlikely(work & SYSCALL_WORK_EXIT))
279 syscall_exit_work(regs, work);
282 static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
284 syscall_exit_to_user_mode_prepare(regs);
285 local_irq_disable_exit_to_user();
286 exit_to_user_mode_prepare(regs);
289 void syscall_exit_to_user_mode_work(struct pt_regs *regs)
291 __syscall_exit_to_user_mode_work(regs);
294 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
296 instrumentation_begin();
297 __syscall_exit_to_user_mode_work(regs);
298 instrumentation_end();
299 __exit_to_user_mode();
302 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
304 __enter_from_user_mode(regs);
307 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
309 instrumentation_begin();
310 exit_to_user_mode_prepare(regs);
311 instrumentation_end();
312 __exit_to_user_mode();
315 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
317 irqentry_state_t ret = {
321 if (user_mode(regs)) {
322 irqentry_enter_from_user_mode(regs);
327 * If this entry hit the idle task invoke ct_irq_enter() whether
328 * RCU is watching or not.
330 * Interrupts can nest when the first interrupt invokes softirq
331 * processing on return which enables interrupts.
333 * Scheduler ticks in the idle task can mark quiescent state and
334 * terminate a grace period, if and only if the timer interrupt is
335 * not nested into another interrupt.
337 * Checking for rcu_is_watching() here would prevent the nesting
338 * interrupt to invoke ct_irq_enter(). If that nested interrupt is
339 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
340 * assume that it is the first interrupt and eventually claim
341 * quiescent state and end grace periods prematurely.
343 * Unconditionally invoke ct_irq_enter() so RCU state stays
346 * TINY_RCU does not support EQS, so let the compiler eliminate
347 * this part when enabled.
349 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
351 * If RCU is not watching then the same careful
352 * sequence vs. lockdep and tracing is required
353 * as in irqentry_enter_from_user_mode().
355 lockdep_hardirqs_off(CALLER_ADDR0);
357 instrumentation_begin();
358 kmsan_unpoison_entry_regs(regs);
359 trace_hardirqs_off_finish();
360 instrumentation_end();
367 * If RCU is watching then RCU only wants to check whether it needs
368 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
369 * already contains a warning when RCU is not watching, so no point
370 * in having another one here.
372 lockdep_hardirqs_off(CALLER_ADDR0);
373 instrumentation_begin();
374 kmsan_unpoison_entry_regs(regs);
375 rcu_irq_enter_check_tick();
376 trace_hardirqs_off_finish();
377 instrumentation_end();
382 void raw_irqentry_exit_cond_resched(void)
384 if (!preempt_count()) {
385 /* Sanity check RCU and thread stack */
386 rcu_irq_exit_check_preempt();
387 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
388 WARN_ON_ONCE(!on_thread_stack());
390 preempt_schedule_irq();
393 #ifdef CONFIG_PREEMPT_DYNAMIC
394 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
395 DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
396 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
397 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
398 void dynamic_irqentry_exit_cond_resched(void)
400 if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
402 raw_irqentry_exit_cond_resched();
407 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
409 lockdep_assert_irqs_disabled();
411 /* Check whether this returns to user mode */
412 if (user_mode(regs)) {
413 irqentry_exit_to_user_mode(regs);
414 } else if (!regs_irqs_disabled(regs)) {
416 * If RCU was not watching on entry this needs to be done
417 * carefully and needs the same ordering of lockdep/tracing
418 * and RCU as the return to user mode path.
420 if (state.exit_rcu) {
421 instrumentation_begin();
422 /* Tell the tracer that IRET will enable interrupts */
423 trace_hardirqs_on_prepare();
424 lockdep_hardirqs_on_prepare();
425 instrumentation_end();
427 lockdep_hardirqs_on(CALLER_ADDR0);
431 instrumentation_begin();
432 if (IS_ENABLED(CONFIG_PREEMPTION))
433 irqentry_exit_cond_resched();
435 /* Covers both tracing and lockdep */
437 instrumentation_end();
440 * IRQ flags state is correct already. Just tell RCU if it
441 * was not watching on entry.
448 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
450 irqentry_state_t irq_state;
452 irq_state.lockdep = lockdep_hardirqs_enabled();
455 lockdep_hardirqs_off(CALLER_ADDR0);
456 lockdep_hardirq_enter();
459 instrumentation_begin();
460 kmsan_unpoison_entry_regs(regs);
461 trace_hardirqs_off_finish();
463 instrumentation_end();
468 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
470 instrumentation_begin();
472 if (irq_state.lockdep) {
473 trace_hardirqs_on_prepare();
474 lockdep_hardirqs_on_prepare();
476 instrumentation_end();
479 lockdep_hardirq_exit();
480 if (irq_state.lockdep)
481 lockdep_hardirqs_on(CALLER_ADDR0);