1 // SPDX-License-Identifier: GPL-2.0-only
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/nospec.h>
25 #include <linux/uprobes.h>
26 #include <linux/livepatch.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
31 #include <xen/xen-ops.h>
32 #include <xen/events.h>
36 #include <asm/traps.h>
38 #include <asm/cpufeature.h>
39 #include <asm/fpu/api.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/io_bitmap.h>
42 #include <asm/syscall.h>
43 #include <asm/irq_stack.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/syscalls.h>
48 /* Check that the stack and regs on entry from user mode are sane. */
49 static void check_user_regs(struct pt_regs *regs)
51 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
52 WARN_ON_ONCE(!on_thread_stack());
53 WARN_ON_ONCE(regs != task_pt_regs(current));
57 #ifdef CONFIG_CONTEXT_TRACKING
59 * enter_from_user_mode - Establish state when coming from user mode
61 * Syscall entry disables interrupts, but user mode is traced as interrupts
62 * enabled. Also with NO_HZ_FULL RCU might be idle.
64 * 1) Tell lockdep that interrupts are disabled
65 * 2) Invoke context tracking if enabled to reactivate RCU
66 * 3) Trace interrupts off state
68 static noinstr void enter_from_user_mode(void)
70 enum ctx_state state = ct_state();
72 lockdep_hardirqs_off(CALLER_ADDR0);
75 instrumentation_begin();
76 CT_WARN_ON(state != CONTEXT_USER);
77 trace_hardirqs_off_finish();
78 instrumentation_end();
81 static __always_inline void enter_from_user_mode(void)
83 lockdep_hardirqs_off(CALLER_ADDR0);
84 instrumentation_begin();
85 trace_hardirqs_off_finish();
86 instrumentation_end();
91 * exit_to_user_mode - Fixup state when exiting to user mode
93 * Syscall exit enables interrupts, but the kernel state is interrupts
94 * disabled when this is invoked. Also tell RCU about it.
96 * 1) Trace interrupts on state
97 * 2) Invoke context tracking if enabled to adjust RCU state
98 * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
99 * 4) Tell lockdep that interrupts are enabled
101 static __always_inline void exit_to_user_mode(void)
103 instrumentation_begin();
104 trace_hardirqs_on_prepare();
105 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
106 instrumentation_end();
109 mds_user_clear_cpu_buffers();
110 lockdep_hardirqs_on(CALLER_ADDR0);
113 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
116 if (arch == AUDIT_ARCH_X86_64) {
117 audit_syscall_entry(regs->orig_ax, regs->di,
118 regs->si, regs->dx, regs->r10);
122 audit_syscall_entry(regs->orig_ax, regs->bx,
123 regs->cx, regs->dx, regs->si);
128 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
129 * to skip the syscall.
131 static long syscall_trace_enter(struct pt_regs *regs)
133 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
135 struct thread_info *ti = current_thread_info();
136 unsigned long ret = 0;
139 work = READ_ONCE(ti->flags);
141 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
142 ret = tracehook_report_syscall_entry(regs);
143 if (ret || (work & _TIF_SYSCALL_EMU))
147 #ifdef CONFIG_SECCOMP
149 * Do seccomp after ptrace, to catch any tracer changes.
151 if (work & _TIF_SECCOMP) {
152 struct seccomp_data sd;
155 sd.nr = regs->orig_ax;
156 sd.instruction_pointer = regs->ip;
158 if (arch == AUDIT_ARCH_X86_64) {
159 sd.args[0] = regs->di;
160 sd.args[1] = regs->si;
161 sd.args[2] = regs->dx;
162 sd.args[3] = regs->r10;
163 sd.args[4] = regs->r8;
164 sd.args[5] = regs->r9;
168 sd.args[0] = regs->bx;
169 sd.args[1] = regs->cx;
170 sd.args[2] = regs->dx;
171 sd.args[3] = regs->si;
172 sd.args[4] = regs->di;
173 sd.args[5] = regs->bp;
176 ret = __secure_computing(&sd);
182 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
183 trace_sys_enter(regs, regs->orig_ax);
185 do_audit_syscall_entry(regs, arch);
187 return ret ?: regs->orig_ax;
190 #define EXIT_TO_USERMODE_LOOP_FLAGS \
191 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
192 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
194 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
197 * In order to return to user mode, we need to have IRQs off with
198 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
199 * can be set at any time on preemptible kernels if we have IRQs on,
200 * so we need to loop. Disabling preemption wouldn't help: doing the
201 * work to clear some of the flags can sleep.
204 /* We have work to do. */
207 if (cached_flags & _TIF_NEED_RESCHED)
210 if (cached_flags & _TIF_UPROBE)
211 uprobe_notify_resume(regs);
213 if (cached_flags & _TIF_PATCH_PENDING)
214 klp_update_patch_state(current);
216 /* deal with pending signal delivery */
217 if (cached_flags & _TIF_SIGPENDING)
220 if (cached_flags & _TIF_NOTIFY_RESUME) {
221 clear_thread_flag(TIF_NOTIFY_RESUME);
222 tracehook_notify_resume(regs);
223 rseq_handle_notify_resume(NULL, regs);
226 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
227 fire_user_return_notifiers();
229 /* Disable IRQs and retry */
232 cached_flags = READ_ONCE(current_thread_info()->flags);
234 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
239 static void __prepare_exit_to_usermode(struct pt_regs *regs)
241 struct thread_info *ti = current_thread_info();
244 addr_limit_user_check();
246 lockdep_assert_irqs_disabled();
249 cached_flags = READ_ONCE(ti->flags);
251 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
252 exit_to_usermode_loop(regs, cached_flags);
254 /* Reload ti->flags; we may have rescheduled above. */
255 cached_flags = READ_ONCE(ti->flags);
257 if (unlikely(cached_flags & _TIF_IO_BITMAP))
258 tss_update_io_bitmap();
260 fpregs_assert_state_consistent();
261 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
266 * Compat syscalls set TS_COMPAT. Make sure we clear it before
267 * returning to user mode. We need to clear it *after* signal
268 * handling, because syscall restart has a fixup for compat
269 * syscalls. The fixup is exercised by the ptrace_syscall_32
272 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
273 * special case only applies after poking regs and before the
274 * very next return to user mode.
276 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
280 __visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
282 instrumentation_begin();
283 __prepare_exit_to_usermode(regs);
284 instrumentation_end();
288 #define SYSCALL_EXIT_WORK_FLAGS \
289 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
290 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
292 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
296 audit_syscall_exit(regs);
298 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
299 trace_sys_exit(regs, regs->ax);
302 * If TIF_SYSCALL_EMU is set, we only get here because of
303 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
304 * We already reported this syscall instruction in
305 * syscall_trace_enter().
308 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
310 if (step || cached_flags & _TIF_SYSCALL_TRACE)
311 tracehook_report_syscall_exit(regs, step);
314 static void __syscall_return_slowpath(struct pt_regs *regs)
316 struct thread_info *ti = current_thread_info();
317 u32 cached_flags = READ_ONCE(ti->flags);
319 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
321 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
322 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
328 * First do one-time work. If these work items are enabled, we
329 * want to run them exactly once per syscall exit with IRQs on.
331 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
332 syscall_slow_exit_work(regs, cached_flags);
335 __prepare_exit_to_usermode(regs);
339 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
340 * state such that we can immediately switch to user mode.
342 __visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
344 instrumentation_begin();
345 __syscall_return_slowpath(regs);
346 instrumentation_end();
351 __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
353 struct thread_info *ti;
355 check_user_regs(regs);
357 enter_from_user_mode();
358 instrumentation_begin();
361 ti = current_thread_info();
362 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
363 nr = syscall_trace_enter(regs);
365 if (likely(nr < NR_syscalls)) {
366 nr = array_index_nospec(nr, NR_syscalls);
367 regs->ax = sys_call_table[nr](regs);
368 #ifdef CONFIG_X86_X32_ABI
369 } else if (likely((nr & __X32_SYSCALL_BIT) &&
370 (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
371 nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
373 regs->ax = x32_sys_call_table[nr](regs);
376 __syscall_return_slowpath(regs);
378 instrumentation_end();
383 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
385 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
386 * all entry and exit work and returns with IRQs off. This function is
387 * extremely hot in workloads that use it, and it's usually called from
388 * do_fast_syscall_32, so forcibly inline it to improve performance.
390 static void do_syscall_32_irqs_on(struct pt_regs *regs)
392 struct thread_info *ti = current_thread_info();
393 unsigned int nr = (unsigned int)regs->orig_ax;
395 #ifdef CONFIG_IA32_EMULATION
396 ti->status |= TS_COMPAT;
399 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
401 * Subtlety here: if ptrace pokes something larger than
402 * 2^32-1 into orig_ax, this truncates it. This may or
403 * may not be necessary, but it matches the old asm
406 nr = syscall_trace_enter(regs);
409 if (likely(nr < IA32_NR_syscalls)) {
410 nr = array_index_nospec(nr, IA32_NR_syscalls);
411 regs->ax = ia32_sys_call_table[nr](regs);
414 __syscall_return_slowpath(regs);
417 /* Handles int $0x80 */
418 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
420 check_user_regs(regs);
422 enter_from_user_mode();
423 instrumentation_begin();
426 do_syscall_32_irqs_on(regs);
428 instrumentation_end();
432 static bool __do_fast_syscall_32(struct pt_regs *regs)
436 /* Fetch EBP from where the vDSO stashed it. */
437 if (IS_ENABLED(CONFIG_X86_64)) {
439 * Micro-optimization: the pointer we're following is
440 * explicitly 32 bits, so it can't be out of range.
442 res = __get_user(*(u32 *)®s->bp,
443 (u32 __user __force *)(unsigned long)(u32)regs->sp);
445 res = get_user(*(u32 *)®s->bp,
446 (u32 __user __force *)(unsigned long)(u32)regs->sp);
450 /* User code screwed up. */
453 __prepare_exit_to_usermode(regs);
457 /* Now this is just like a normal syscall. */
458 do_syscall_32_irqs_on(regs);
462 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
463 __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
466 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
467 * convention. Adjust regs so it looks like we entered using int80.
469 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
470 vdso_image_32.sym_int80_landing_pad;
473 check_user_regs(regs);
476 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
477 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
480 regs->ip = landing_pad;
482 enter_from_user_mode();
483 instrumentation_begin();
486 success = __do_fast_syscall_32(regs);
488 instrumentation_end();
491 /* If it failed, keep it simple: use IRET. */
497 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
498 * SYSRETL is available on all 64-bit CPUs, so we don't need to
499 * bother with SYSEXIT.
501 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
502 * because the ECX fixup above will ensure that this is essentially
505 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
506 regs->ip == landing_pad &&
507 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
510 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
512 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
513 * because the ECX fixup above will ensure that this is essentially
516 * We don't allow syscalls at all from VM86 mode, but we still
517 * need to check VM, because we might be returning from sys_vm86.
519 return static_cpu_has(X86_FEATURE_SEP) &&
520 regs->cs == __USER_CS && regs->ss == __USER_DS &&
521 regs->ip == landing_pad &&
522 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
527 SYSCALL_DEFINE0(ni_syscall)
533 * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
535 * @regs: Pointer to pt_regs of interrupted context
538 * - lockdep irqflag state tracking as low level ASM entry disabled
541 * - Context tracking if the exception hit user mode.
543 * - The hardirq tracer to keep the state consistent as low level ASM
544 * entry disabled interrupts.
546 * For kernel mode entries RCU handling is done conditional. If RCU is
547 * watching then the only RCU requirement is to check whether the tick has
548 * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
549 * invoked on entry and rcu_irq_exit() on exit.
551 * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
552 * solves the problem of kernel mode pagefaults which can schedule, which
553 * is not possible after invoking rcu_irq_enter() without undoing it.
555 * For user mode entries enter_from_user_mode() must be invoked to
556 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
557 * would not be possible.
559 * Returns: True if RCU has been adjusted on a kernel entry
562 * The return value must be fed into the rcu_exit argument of
563 * idtentry_exit_cond_rcu().
565 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
567 if (user_mode(regs)) {
568 enter_from_user_mode();
573 * If this entry hit the idle task invoke rcu_irq_enter() whether
574 * RCU is watching or not.
576 * Interupts can nest when the first interrupt invokes softirq
577 * processing on return which enables interrupts.
579 * Scheduler ticks in the idle task can mark quiescent state and
580 * terminate a grace period, if and only if the timer interrupt is
581 * not nested into another interrupt.
583 * Checking for __rcu_is_watching() here would prevent the nesting
584 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
585 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
586 * assume that it is the first interupt and eventually claim
587 * quiescient state and end grace periods prematurely.
589 * Unconditionally invoke rcu_irq_enter() so RCU state stays
592 * TINY_RCU does not support EQS, so let the compiler eliminate
593 * this part when enabled.
595 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
597 * If RCU is not watching then the same careful
598 * sequence vs. lockdep and tracing is required
599 * as in enter_from_user_mode().
601 lockdep_hardirqs_off(CALLER_ADDR0);
603 instrumentation_begin();
604 trace_hardirqs_off_finish();
605 instrumentation_end();
611 * If RCU is watching then RCU only wants to check whether it needs
612 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
613 * already contains a warning when RCU is not watching, so no point
614 * in having another one here.
616 instrumentation_begin();
617 rcu_irq_enter_check_tick();
618 /* Use the combo lockdep/tracing function */
619 trace_hardirqs_off();
620 instrumentation_end();
625 static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
627 if (may_sched && !preempt_count()) {
628 /* Sanity check RCU and thread stack */
629 rcu_irq_exit_check_preempt();
630 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
631 WARN_ON_ONCE(!on_thread_stack());
633 preempt_schedule_irq();
635 /* Covers both tracing and lockdep */
640 * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
642 * @regs: Pointer to pt_regs (exception entry regs)
643 * @rcu_exit: Invoke rcu_irq_exit() if true
645 * Depending on the return target (kernel/user) this runs the necessary
646 * preemption and work checks if possible and reguired and returns to
647 * the caller with interrupts disabled and no further work pending.
649 * This is the last action before returning to the low level ASM code which
650 * just needs to return to the appropriate context.
652 * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
653 * function must be fed into the @rcu_exit argument.
655 void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
657 lockdep_assert_irqs_disabled();
659 /* Check whether this returns to user mode */
660 if (user_mode(regs)) {
661 prepare_exit_to_usermode(regs);
662 } else if (regs->flags & X86_EFLAGS_IF) {
664 * If RCU was not watching on entry this needs to be done
665 * carefully and needs the same ordering of lockdep/tracing
666 * and RCU as the return to user mode path.
669 instrumentation_begin();
670 /* Tell the tracer that IRET will enable interrupts */
671 trace_hardirqs_on_prepare();
672 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
673 instrumentation_end();
675 lockdep_hardirqs_on(CALLER_ADDR0);
679 instrumentation_begin();
680 idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
681 instrumentation_end();
684 * IRQ flags state is correct already. Just tell RCU if it
685 * was not watching on entry.
693 * idtentry_enter_user - Handle state tracking on idtentry from user mode
694 * @regs: Pointer to pt_regs of interrupted context
696 * Invokes enter_from_user_mode() to establish the proper context for
697 * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
699 void noinstr idtentry_enter_user(struct pt_regs *regs)
701 enter_from_user_mode();
705 * idtentry_exit_user - Handle return from exception to user mode
706 * @regs: Pointer to pt_regs (exception entry regs)
708 * Runs the necessary preemption and work checks and returns to the caller
709 * with interrupts disabled and no further work pending.
711 * This is the last action before returning to the low level ASM code which
712 * just needs to return to the appropriate context.
714 * Counterpart to idtentry_enter_user().
716 void noinstr idtentry_exit_user(struct pt_regs *regs)
718 lockdep_assert_irqs_disabled();
720 prepare_exit_to_usermode(regs);
724 #ifndef CONFIG_PREEMPTION
726 * Some hypercalls issued by the toolstack can take many 10s of
727 * seconds. Allow tasks running hypercalls via the privcmd driver to
728 * be voluntarily preempted even if full kernel preemption is
731 * Such preemptible hypercalls are bracketed by
732 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
735 DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
736 EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
739 * In case of scheduling the flag must be cleared and restored after
740 * returning from schedule as the task might move to a different CPU.
742 static __always_inline bool get_and_clear_inhcall(void)
744 bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
746 __this_cpu_write(xen_in_preemptible_hcall, false);
750 static __always_inline void restore_inhcall(bool inhcall)
752 __this_cpu_write(xen_in_preemptible_hcall, inhcall);
755 static __always_inline bool get_and_clear_inhcall(void) { return false; }
756 static __always_inline void restore_inhcall(bool inhcall) { }
759 static void __xen_pv_evtchn_do_upcall(void)
762 inc_irq_stat(irq_hv_callback_count);
764 xen_hvm_evtchn_do_upcall();
769 __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
771 struct pt_regs *old_regs;
772 bool inhcall, rcu_exit;
774 rcu_exit = idtentry_enter_cond_rcu(regs);
775 old_regs = set_irq_regs(regs);
777 instrumentation_begin();
778 run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
779 instrumentation_begin();
781 set_irq_regs(old_regs);
783 inhcall = get_and_clear_inhcall();
784 if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
785 instrumentation_begin();
786 idtentry_exit_cond_resched(regs, true);
787 instrumentation_end();
788 restore_inhcall(inhcall);
790 idtentry_exit_cond_rcu(regs, rcu_exit);
793 #endif /* CONFIG_XEN_PV */