1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1991,1992 Linus Torvalds
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
7 * Stack layout while running C code:
8 * ptrace needs to have all registers on the stack.
9 * If the order here is changed, it needs to be
10 * updated in fork.c:copy_process(), signal.c:do_signal(),
11 * ptrace.c and ptrace.h
23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
32 #include <linux/linkage.h>
33 #include <linux/err.h>
34 #include <asm/thread_info.h>
35 #include <asm/irqflags.h>
36 #include <asm/errno.h>
37 #include <asm/segment.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/irq_vectors.h>
42 #include <asm/cpufeatures.h>
43 #include <asm/alternative-asm.h>
46 #include <asm/frame.h>
47 #include <asm/nospec-branch.h>
51 .section .entry.text, "ax"
54 * We use macros for low-level operations which need to be overridden
55 * for paravirtualization. The following will never clobber any registers:
56 * INTERRUPT_RETURN (aka. "iret")
57 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
58 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
60 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
61 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
62 * Allowing a register to be clobbered can shrink the paravirt replacement
63 * enough to patch inline, increasing performance.
66 #ifdef CONFIG_PREEMPTION
67 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
69 # define preempt_stop(clobbers)
72 .macro TRACE_IRQS_IRET
73 #ifdef CONFIG_TRACE_IRQFLAGS
74 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
81 #define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
84 * User gs save/restore
86 * %gs is used for userland TLS and kernel only uses it for stack
87 * canary which is required to be at %gs:20 by gcc. Read the comment
88 * at the top of stackprotector.h for more info.
90 * Local labels 98 and 99 are used.
92 #ifdef CONFIG_X86_32_LAZY_GS
94 /* unfortunately push/pop can't be no-op */
99 addl $(4 + \pop), %esp
104 /* all the rest are no-op */
111 .macro REG_TO_PTGS reg
113 .macro SET_KERNEL_GS reg
116 #else /* CONFIG_X86_32_LAZY_GS */
129 .pushsection .fixup, "ax"
133 _ASM_EXTABLE(98b, 99b)
137 98: mov PT_GS(%esp), %gs
140 .pushsection .fixup, "ax"
141 99: movl $0, PT_GS(%esp)
144 _ASM_EXTABLE(98b, 99b)
150 .macro REG_TO_PTGS reg
151 movl \reg, PT_GS(%esp)
153 .macro SET_KERNEL_GS reg
154 movl $(__KERNEL_STACK_CANARY), \reg
158 #endif /* CONFIG_X86_32_LAZY_GS */
160 /* Unconditionally switch to user cr3 */
161 .macro SWITCH_TO_USER_CR3 scratch_reg:req
162 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
164 movl %cr3, \scratch_reg
165 orl $PTI_SWITCH_MASK, \scratch_reg
166 movl \scratch_reg, %cr3
170 .macro BUG_IF_WRONG_CR3 no_user_check=0
171 #ifdef CONFIG_DEBUG_ENTRY
172 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
173 .if \no_user_check == 0
174 /* coming from usermode? */
175 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
180 testl $PTI_SWITCH_MASK, %eax
182 /* From userspace with kernel cr3 - BUG */
189 * Switch to kernel cr3 if not already loaded and return current cr3 in
192 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
193 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
194 movl %cr3, \scratch_reg
195 /* Test if we are already on kernel CR3 */
196 testl $PTI_SWITCH_MASK, \scratch_reg
198 andl $(~PTI_SWITCH_MASK), \scratch_reg
199 movl \scratch_reg, %cr3
200 /* Return original CR3 in \scratch_reg */
201 orl $PTI_SWITCH_MASK, \scratch_reg
205 #define CS_FROM_ENTRY_STACK (1 << 31)
206 #define CS_FROM_USER_CR3 (1 << 30)
207 #define CS_FROM_KERNEL (1 << 29)
211 * The high bits of the CS dword (__csh) are used for CS_FROM_*.
212 * Clear them in case hardware didn't do this for us.
214 andl $0x0000ffff, 4*4(%esp)
217 testl $X86_EFLAGS_VM, 5*4(%esp)
218 jnz .Lfrom_usermode_no_fixup_\@
220 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
221 jnz .Lfrom_usermode_no_fixup_\@
223 orl $CS_FROM_KERNEL, 4*4(%esp)
226 * When we're here from kernel mode; the (exception) stack looks like:
228 * 6*4(%esp) - <previous context>
232 * 2*4(%esp) - orig_eax
233 * 1*4(%esp) - gs / function
236 * Lets build a 5 entry IRET frame after that, such that struct pt_regs
237 * is complete and in particular regs->sp is correct. This gives us
238 * the original 6 enties as gap:
240 * 14*4(%esp) - <previous context>
241 * 13*4(%esp) - gap / flags
242 * 12*4(%esp) - gap / cs
243 * 11*4(%esp) - gap / ip
244 * 10*4(%esp) - gap / orig_eax
245 * 9*4(%esp) - gap / gs / function
246 * 8*4(%esp) - gap / fs
252 * 2*4(%esp) - orig_eax
253 * 1*4(%esp) - gs / function
258 pushl %esp # sp (points at ss)
259 addl $7*4, (%esp) # point sp back at the previous context
260 pushl 7*4(%esp) # flags
263 pushl 7*4(%esp) # orig_eax
264 pushl 7*4(%esp) # gs / function
266 .Lfrom_usermode_no_fixup_\@:
271 * We're called with %ds, %es, %fs, and %gs from the interrupted
272 * frame, so we shouldn't use them. Also, we may be in ESPFIX
273 * mode and therefore have a nonzero SS base and an offset ESP,
274 * so any attempt to access the stack needs to use SS. (except for
275 * accesses through %esp, which automatically use SS.)
277 testl $CS_FROM_KERNEL, 1*4(%esp)
278 jz .Lfinished_frame_\@
281 * Reconstruct the 3 entry IRET frame right after the (modified)
282 * regs->sp without lowering %esp in between, such that an NMI in the
283 * middle doesn't scribble our stack.
287 movl 5*4(%esp), %eax # (modified) regs->sp
289 movl 4*4(%esp), %ecx # flags
290 movl %ecx, %ss:-1*4(%eax)
292 movl 3*4(%esp), %ecx # cs
293 andl $0x0000ffff, %ecx
294 movl %ecx, %ss:-2*4(%eax)
296 movl 2*4(%esp), %ecx # ip
297 movl %ecx, %ss:-3*4(%eax)
299 movl 1*4(%esp), %ecx # eax
300 movl %ecx, %ss:-4*4(%eax)
308 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
316 movl $(__KERNEL_PERCPU), %eax
318 .if \unwind_espfix > 0
333 movl $(__USER_DS), %edx
339 /* Switch to kernel stack if necessary */
340 .if \switch_stacks > 0
341 SWITCH_TO_KERNEL_STACK
345 .macro SAVE_ALL_NMI cr3_reg:req
351 * Now switch the CR3 when PTI is enabled.
353 * We can enter with either user or kernel cr3, the code will
354 * store the old cr3 in \cr3_reg and switches to the kernel cr3
357 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
362 .macro RESTORE_INT_REGS
372 .macro RESTORE_REGS pop=0
379 .pushsection .fixup, "ax"
393 .macro RESTORE_ALL_NMI cr3_reg:req pop=0
395 * Now switch the CR3 when PTI is enabled.
397 * We enter with kernel cr3 and switch the cr3 to the value
398 * stored on \cr3_reg, which is either a user or a kernel cr3.
400 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
402 testl $PTI_SWITCH_MASK, \cr3_reg
405 /* User cr3 in \cr3_reg - write it to hardware cr3 */
412 RESTORE_REGS pop=\pop
415 .macro CHECK_AND_APPLY_ESPFIX
416 #ifdef CONFIG_X86_ESPFIX32
417 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
419 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
421 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
423 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
424 * are returning to the kernel.
425 * See comments in process.c:copy_thread() for details.
427 movb PT_OLDSS(%esp), %ah
428 movb PT_CS(%esp), %al
429 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
430 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
431 jne .Lend_\@ # returning to user-space with LDT SS
434 * Setup and switch to ESPFIX stack
436 * We're returning to userspace with a 16 bit stack. The CPU will not
437 * restore the high word of ESP for us on executing iret... This is an
438 * "official" bug of all the x86-compatible CPUs, which we can work
439 * around to make dosemu and wine happy. We do this by preloading the
440 * high word of ESP with the high word of the userspace ESP while
441 * compensating for the offset by changing to the ESPFIX segment with
442 * a base address that matches for the difference.
444 mov %esp, %edx /* load kernel esp */
445 mov PT_OLDESP(%esp), %eax /* load userspace esp */
446 mov %dx, %ax /* eax: new kernel esp */
447 sub %eax, %edx /* offset (low word is 0) */
449 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
450 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
452 pushl %eax /* new kernel esp */
454 * Disable interrupts, but do not irqtrace this section: we
455 * will soon execute iret and the tracer was already set to
456 * the irqstate after the IRET:
458 DISABLE_INTERRUPTS(CLBR_ANY)
459 lss (%esp), %esp /* switch to espfix segment */
461 #endif /* CONFIG_X86_ESPFIX32 */
465 * Called with pt_regs fully populated and kernel segments loaded,
466 * so we can access PER_CPU and use the integer registers.
468 * We need to be very careful here with the %esp switch, because an NMI
469 * can happen everywhere. If the NMI handler finds itself on the
470 * entry-stack, it will overwrite the task-stack and everything we
471 * copied there. So allocate the stack-frame on the task-stack and
472 * switch to it before we do any copying.
475 .macro SWITCH_TO_KERNEL_STACK
477 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
481 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
484 * %eax now contains the entry cr3 and we carry it forward in
485 * that register for the time this macro runs
488 /* Are we on the entry stack? Bail out if not! */
489 movl PER_CPU_VAR(cpu_entry_area), %ecx
490 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
491 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
492 cmpl $SIZEOF_entry_stack, %ecx
495 /* Load stack pointer into %esi and %edi */
499 /* Move %edi to the top of the entry stack */
500 andl $(MASK_entry_stack), %edi
501 addl $(SIZEOF_entry_stack), %edi
503 /* Load top of task-stack into %edi */
504 movl TSS_entry2task_stack(%edi), %edi
506 /* Special case - entry from kernel mode via entry stack */
508 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
509 movb PT_CS(%esp), %cl
510 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
512 movl PT_CS(%esp), %ecx
513 andl $SEGMENT_RPL_MASK, %ecx
516 jb .Lentry_from_kernel_\@
519 movl $PTREGS_SIZE, %ecx
522 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
526 * Stack-frame contains 4 additional segment registers when
527 * coming from VM86 mode
534 /* Allocate frame on task-stack */
537 /* Switch to task-stack */
541 * We are now on the task-stack and can safely copy over the
550 .Lentry_from_kernel_\@:
553 * This handles the case when we enter the kernel from
554 * kernel-mode and %esp points to the entry-stack. When this
555 * happens we need to switch to the task-stack to run C code,
556 * but switch back to the entry-stack again when we approach
557 * iret and return to the interrupted code-path. This usually
558 * happens when we hit an exception while restoring user-space
559 * segment registers on the way back to user-space or when the
560 * sysenter handler runs with eflags.tf set.
562 * When we switch to the task-stack here, we can't trust the
563 * contents of the entry-stack anymore, as the exception handler
564 * might be scheduled out or moved to another CPU. Therefore we
565 * copy the complete entry-stack to the task-stack and set a
566 * marker in the iret-frame (bit 31 of the CS dword) to detect
567 * what we've done on the iret path.
569 * On the iret path we copy everything back and switch to the
570 * entry-stack, so that the interrupted kernel code-path
571 * continues on the same stack it was interrupted with.
573 * Be aware that an NMI can happen anytime in this code.
575 * %esi: Entry-Stack pointer (same as %esp)
576 * %edi: Top of the task stack
577 * %eax: CR3 on kernel entry
580 /* Calculate number of bytes on the entry stack in %ecx */
583 /* %ecx to the top of entry-stack */
584 andl $(MASK_entry_stack), %ecx
585 addl $(SIZEOF_entry_stack), %ecx
587 /* Number of bytes on the entry stack to %ecx */
590 /* Mark stackframe as coming from entry stack */
591 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
594 * Test the cr3 used to enter the kernel and add a marker
595 * so that we can switch back to it before iret.
597 testl $PTI_SWITCH_MASK, %eax
599 orl $CS_FROM_USER_CR3, PT_CS(%esp)
602 * %esi and %edi are unchanged, %ecx contains the number of
603 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
604 * the stack-frame on task-stack and copy everything over
606 jmp .Lcopy_pt_regs_\@
612 * Switch back from the kernel stack to the entry stack.
614 * The %esp register must point to pt_regs on the task stack. It will
615 * first calculate the size of the stack-frame to copy, depending on
616 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
617 * to copy the contents of the stack over to the entry stack.
619 * We must be very careful here, as we can't trust the contents of the
620 * task-stack once we switched to the entry-stack. When an NMI happens
621 * while on the entry-stack, the NMI handler will switch back to the top
622 * of the task stack, overwriting our stack-frame we are about to copy.
623 * Therefore we switch the stack only after everything is copied over.
625 .macro SWITCH_TO_ENTRY_STACK
627 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
630 movl $PTREGS_SIZE, %ecx
633 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
636 /* Additional 4 registers to copy when returning to VM86 mode */
642 /* Initialize source and destination for movsl */
643 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
647 /* Save future stack pointer in %ebx */
650 /* Copy over the stack-frame */
656 * Switch to entry-stack - needs to happen after everything is
657 * copied because the NMI handler will overwrite the task-stack
658 * when on entry-stack
666 * This macro handles the case when we return to kernel-mode on the iret
667 * path and have to switch back to the entry stack and/or user-cr3
669 * See the comments below the .Lentry_from_kernel_\@ label in the
670 * SWITCH_TO_KERNEL_STACK macro for more details.
672 .macro PARANOID_EXIT_TO_KERNEL_MODE
675 * Test if we entered the kernel with the entry-stack. Most
676 * likely we did not, because this code only runs on the
677 * return-to-kernel path.
679 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
682 /* Unlikely slow-path */
684 /* Clear marker from stack-frame */
685 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
687 /* Copy the remaining task-stack contents to entry-stack */
689 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
691 /* Bytes on the task-stack to ecx */
692 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
695 /* Allocate stack-frame on entry-stack */
699 * Save future stack-pointer, we must not switch until the
700 * copy is done, otherwise the NMI handler could destroy the
701 * contents of the task-stack we are about to copy.
710 /* Safe to switch to entry-stack now */
714 * We came from entry-stack and need to check if we also need to
715 * switch back to user cr3.
717 testl $CS_FROM_USER_CR3, PT_CS(%esp)
720 /* Clear marker from stack-frame */
721 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
723 SWITCH_TO_USER_CR3 scratch_reg=%eax
731 ENTRY(__switch_to_asm)
733 * Save callee-saved registers
734 * This must match the order in struct inactive_task_frame
743 movl %esp, TASK_threadsp(%eax)
744 movl TASK_threadsp(%edx), %esp
746 #ifdef CONFIG_STACKPROTECTOR
747 movl TASK_stack_canary(%edx), %ebx
748 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
751 #ifdef CONFIG_RETPOLINE
753 * When switching from a shallower to a deeper call stack
754 * the RSB may either underflow or use entries populated
755 * with userspace addresses. On CPUs where those concerns
756 * exist, overwrite the RSB with entries which capture
757 * speculative execution to prevent attack.
759 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
762 /* restore callee-saved registers */
773 * The unwinder expects the last frame on the stack to always be at the same
774 * offset from the end of the page, which allows it to validate the stack.
775 * Calling schedule_tail() directly would break that convention because its an
776 * asmlinkage function so its argument has to be pushed on the stack. This
777 * wrapper creates a proper "end of stack" frame header before the call.
779 ENTRY(schedule_tail_wrapper)
788 ENDPROC(schedule_tail_wrapper)
790 * A newly forked process directly context switches into this address.
792 * eax: prev task we switched from
793 * ebx: kernel thread func (NULL for user thread)
794 * edi: kernel thread arg
797 call schedule_tail_wrapper
800 jnz 1f /* kernel threads are uncommon */
803 /* When we fork, we trace the syscall return in the child, too. */
805 call syscall_return_slowpath
813 * A kernel thread is allowed to return here after successfully
814 * calling do_execve(). Exit to userspace to complete the execve()
817 movl $0, PT_EAX(%esp)
822 * Return to user mode is not as complex as all this looks,
823 * but we want the default path for a system call return to
824 * go as quickly as possible which is why some of this is
825 * less clear than it otherwise should be.
828 # userspace resumption stub bypassing syscall exit tracing
831 preempt_stop(CLBR_ANY)
834 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
835 movb PT_CS(%esp), %al
836 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
839 * We can be coming here from child spawned by kernel_thread().
841 movl PT_CS(%esp), %eax
842 andl $SEGMENT_RPL_MASK, %eax
845 jb restore_all_kernel # not returning to v8086 or userspace
847 ENTRY(resume_userspace)
848 DISABLE_INTERRUPTS(CLBR_ANY)
851 call prepare_exit_to_usermode
853 END(ret_from_exception)
855 GLOBAL(__begin_SYSENTER_singlestep_region)
857 * All code from here through __end_SYSENTER_singlestep_region is subject
858 * to being single-stepped if a user program sets TF and executes SYSENTER.
859 * There is absolutely nothing that we can do to prevent this from happening
860 * (thanks Intel!). To keep our handling of this situation as simple as
861 * possible, we handle TF just like AC and NT, except that our #DB handler
862 * will ignore all of the single-step traps generated in this range.
867 * Xen doesn't set %esp to be precisely what the normal SYSENTER
868 * entry point expects, so fix it up before using the normal path.
870 ENTRY(xen_sysenter_target)
871 addl $5*4, %esp /* remove xen-provided frame */
872 jmp .Lsysenter_past_esp
876 * 32-bit SYSENTER entry.
878 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
879 * if X86_FEATURE_SEP is available. This is the preferred system call
880 * entry on 32-bit systems.
882 * The SYSENTER instruction, in principle, should *only* occur in the
883 * vDSO. In practice, a small number of Android devices were shipped
884 * with a copy of Bionic that inlined a SYSENTER instruction. This
885 * never happened in any of Google's Bionic versions -- it only happened
886 * in a narrow range of Intel-provided versions.
888 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
889 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
890 * SYSENTER does not save anything on the stack,
891 * and does not save old EIP (!!!), ESP, or EFLAGS.
893 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
894 * user and/or vm86 state), we explicitly disable the SYSENTER
895 * instruction in vm86 mode by reprogramming the MSRs.
898 * eax system call number
907 ENTRY(entry_SYSENTER_32)
909 * On entry-stack with all userspace-regs live - save and
910 * restore eflags and %eax to use it as scratch-reg for the cr3
915 BUG_IF_WRONG_CR3 no_user_check=1
916 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
920 /* Stack empty again, switch to task stack */
921 movl TSS_entry2task_stack(%esp), %esp
924 pushl $__USER_DS /* pt_regs->ss */
925 pushl %ebp /* pt_regs->sp (stashed in bp) */
926 pushfl /* pt_regs->flags (except IF = 0) */
927 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
928 pushl $__USER_CS /* pt_regs->cs */
929 pushl $0 /* pt_regs->ip = 0 (placeholder) */
930 pushl %eax /* pt_regs->orig_ax */
931 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
934 * SYSENTER doesn't filter flags, so we need to clear NT, AC
935 * and TF ourselves. To save a few cycles, we can check whether
936 * either was set instead of doing an unconditional popfq.
937 * This needs to happen before enabling interrupts so that
938 * we don't get preempted with NT set.
940 * If TF is set, we will single-step all the way to here -- do_debug
941 * will ignore all the traps. (Yes, this is slow, but so is
942 * single-stepping in general. This allows us to avoid having
943 * a more complicated code to handle the case where a user program
944 * forces us to single-step through the SYSENTER entry code.)
946 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
947 * out-of-line as an optimization: NT is unlikely to be set in the
948 * majority of the cases and instead of polluting the I$ unnecessarily,
949 * we're keeping that code behind a branch which will predict as
950 * not-taken and therefore its instructions won't be fetched.
952 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
953 jnz .Lsysenter_fix_flags
954 .Lsysenter_flags_fixed:
957 * User mode is traced as though IRQs are on, and SYSENTER
963 call do_fast_syscall_32
964 /* XEN PV guests always use IRET path */
965 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
966 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
970 /* Opportunistic SYSEXIT */
971 TRACE_IRQS_ON /* User mode traces as IRQs on. */
974 * Setup entry stack - we keep the pointer in %eax and do the
975 * switch after almost all user-state is restored.
978 /* Load entry stack pointer and allocate frame for eflags/eax */
979 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
982 /* Copy eflags and eax to entry stack */
983 movl PT_EFLAGS(%esp), %edi
984 movl PT_EAX(%esp), %esi
988 /* Restore user registers and segments */
989 movl PT_EIP(%esp), %edx /* pt_regs->ip */
990 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
991 1: mov PT_FS(%esp), %fs
994 popl %ebx /* pt_regs->bx */
995 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
996 popl %esi /* pt_regs->si */
997 popl %edi /* pt_regs->di */
998 popl %ebp /* pt_regs->bp */
1000 /* Switch to entry stack */
1003 /* Now ready to switch the cr3 */
1004 SWITCH_TO_USER_CR3 scratch_reg=%eax
1007 * Restore all flags except IF. (We restore IF separately because
1008 * STI gives a one-instruction window in which we won't be interrupted,
1009 * whereas POPF does not.)
1011 btrl $X86_EFLAGS_IF_BIT, (%esp)
1012 BUG_IF_WRONG_CR3 no_user_check=1
1017 * Return back to the vDSO, which will pop ecx and edx.
1018 * Don't bother with DS and ES (they already contain __USER_DS).
1023 .pushsection .fixup, "ax"
1024 2: movl $0, PT_FS(%esp)
1027 _ASM_EXTABLE(1b, 2b)
1030 .Lsysenter_fix_flags:
1031 pushl $X86_EFLAGS_FIXED
1033 jmp .Lsysenter_flags_fixed
1034 GLOBAL(__end_SYSENTER_singlestep_region)
1035 ENDPROC(entry_SYSENTER_32)
1038 * 32-bit legacy system call entry.
1040 * 32-bit x86 Linux system calls traditionally used the INT $0x80
1041 * instruction. INT $0x80 lands here.
1043 * This entry point can be used by any 32-bit perform system calls.
1044 * Instances of INT $0x80 can be found inline in various programs and
1045 * libraries. It is also used by the vDSO's __kernel_vsyscall
1046 * fallback for hardware that doesn't support a faster entry method.
1047 * Restarted 32-bit system calls also fall back to INT $0x80
1048 * regardless of what instruction was originally used to do the system
1049 * call. (64-bit programs can use INT $0x80 as well, but they can
1050 * only run on 64-bit kernels and therefore land in
1051 * entry_INT80_compat.)
1053 * This is considered a slow path. It is not used by most libc
1054 * implementations on modern hardware except during process startup.
1057 * eax system call number
1065 ENTRY(entry_INT80_32)
1067 pushl %eax /* pt_regs->orig_ax */
1069 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
1072 * User mode is traced as though IRQs are on, and the interrupt gate
1078 call do_int80_syscall_32
1085 SWITCH_TO_ENTRY_STACK
1086 .Lrestore_all_notrace:
1087 CHECK_AND_APPLY_ESPFIX
1089 /* Switch back to user CR3 */
1090 SWITCH_TO_USER_CR3 scratch_reg=%eax
1094 /* Restore user state */
1095 RESTORE_REGS pop=4 # skip orig_eax/error_code
1098 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
1099 * when returning from IPI handler and when returning from
1100 * scheduler to user-space.
1105 #ifdef CONFIG_PREEMPTION
1106 DISABLE_INTERRUPTS(CLBR_ANY)
1107 cmpl $0, PER_CPU_VAR(__preempt_count)
1109 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
1111 call preempt_schedule_irq
1115 PARANOID_EXIT_TO_KERNEL_MODE
1120 .section .fixup, "ax"
1122 pushl $0 # no error code
1123 pushl $do_iret_error
1125 #ifdef CONFIG_DEBUG_ENTRY
1127 * The stack-frame here is the one that iret faulted on, so its a
1128 * return-to-user frame. We are on kernel-cr3 because we come here from
1129 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1130 * as the checker expects it.
1133 SWITCH_TO_USER_CR3 scratch_reg=%eax
1137 jmp common_exception
1139 _ASM_EXTABLE(.Lirq_return, iret_exc)
1140 ENDPROC(entry_INT80_32)
1142 .macro FIXUP_ESPFIX_STACK
1144 * Switch back for ESPFIX stack to the normal zerobased stack
1146 * We can't call C functions using the ESPFIX stack. This code reads
1147 * the high word of the segment base from the GDT and swiches to the
1148 * normal stack and adjusts ESP with the matching offset.
1150 #ifdef CONFIG_X86_ESPFIX32
1151 /* fixup the stack */
1152 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
1153 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
1155 addl %esp, %eax /* the adjusted stack pointer */
1158 lss (%esp), %esp /* switch to the normal stack segment */
1162 .macro UNWIND_ESPFIX_STACK
1163 /* It's safe to clobber %eax, all other regs need to be preserved */
1164 #ifdef CONFIG_X86_ESPFIX32
1166 /* see if on espfix stack */
1167 cmpw $__ESPFIX_SS, %ax
1169 /* switch to normal stack */
1176 * Build the entry stubs with some assembler magic.
1177 * We pack 1 stub into every 8-byte block.
1180 ENTRY(irq_entries_start)
1181 vector=FIRST_EXTERNAL_VECTOR
1182 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
1183 pushl $(~vector+0x80) /* Note: always in signed byte range */
1185 jmp common_interrupt
1188 END(irq_entries_start)
1190 #ifdef CONFIG_X86_LOCAL_APIC
1192 ENTRY(spurious_entries_start)
1193 vector=FIRST_SYSTEM_VECTOR
1194 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
1195 pushl $(~vector+0x80) /* Note: always in signed byte range */
1200 END(spurious_entries_start)
1204 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1205 SAVE_ALL switch_stacks=1
1206 ENCODE_FRAME_POINTER
1209 call smp_spurious_interrupt
1211 ENDPROC(common_spurious)
1215 * the CPU automatically disables interrupts when executing an IRQ vector,
1216 * so IRQ-flags tracing has to follow that:
1218 .p2align CONFIG_X86_L1_CACHE_SHIFT
1221 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1223 SAVE_ALL switch_stacks=1
1224 ENCODE_FRAME_POINTER
1229 ENDPROC(common_interrupt)
1231 #define BUILD_INTERRUPT3(name, nr, fn) \
1235 SAVE_ALL switch_stacks=1; \
1236 ENCODE_FRAME_POINTER; \
1240 jmp ret_from_intr; \
1243 #define BUILD_INTERRUPT(name, nr) \
1244 BUILD_INTERRUPT3(name, nr, smp_##name); \
1246 /* The include is where all of the SMP etc. interrupts come from */
1247 #include <asm/entry_arch.h>
1249 ENTRY(coprocessor_error)
1252 pushl $do_coprocessor_error
1253 jmp common_exception
1254 END(coprocessor_error)
1256 ENTRY(simd_coprocessor_error)
1259 #ifdef CONFIG_X86_INVD_BUG
1260 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1261 ALTERNATIVE "pushl $do_general_protection", \
1262 "pushl $do_simd_coprocessor_error", \
1265 pushl $do_simd_coprocessor_error
1267 jmp common_exception
1268 END(simd_coprocessor_error)
1270 ENTRY(device_not_available)
1272 pushl $-1 # mark this as an int
1273 pushl $do_device_not_available
1274 jmp common_exception
1275 END(device_not_available)
1277 #ifdef CONFIG_PARAVIRT
1280 _ASM_EXTABLE(native_iret, iret_exc)
1288 jmp common_exception
1295 jmp common_exception
1301 pushl $do_invalid_op
1302 jmp common_exception
1305 ENTRY(coprocessor_segment_overrun)
1308 pushl $do_coprocessor_segment_overrun
1309 jmp common_exception
1310 END(coprocessor_segment_overrun)
1314 pushl $do_invalid_TSS
1315 jmp common_exception
1318 ENTRY(segment_not_present)
1320 pushl $do_segment_not_present
1321 jmp common_exception
1322 END(segment_not_present)
1324 ENTRY(stack_segment)
1326 pushl $do_stack_segment
1327 jmp common_exception
1330 ENTRY(alignment_check)
1332 pushl $do_alignment_check
1333 jmp common_exception
1334 END(alignment_check)
1338 pushl $0 # no error code
1339 pushl $do_divide_error
1340 jmp common_exception
1343 #ifdef CONFIG_X86_MCE
1344 ENTRY(machine_check)
1347 pushl machine_check_vector
1348 jmp common_exception
1352 ENTRY(spurious_interrupt_bug)
1355 pushl $do_spurious_interrupt_bug
1356 jmp common_exception
1357 END(spurious_interrupt_bug)
1359 #ifdef CONFIG_XEN_PV
1360 ENTRY(xen_hypervisor_callback)
1362 * Check to see if we got the event in the critical
1363 * region in xen_iret_direct, after we've reenabled
1364 * events and checked for pending events. This simulates
1365 * iret instruction's behaviour where it delivers a
1366 * pending interrupt when enabling interrupts:
1368 cmpl $xen_iret_start_crit, (%esp)
1370 cmpl $xen_iret_end_crit, (%esp)
1372 call xen_iret_crit_fixup
1374 pushl $-1 /* orig_ax = -1 => not a system call */
1376 ENCODE_FRAME_POINTER
1379 call xen_evtchn_do_upcall
1380 #ifndef CONFIG_PREEMPTION
1381 call xen_maybe_preempt_hcall
1384 ENDPROC(xen_hypervisor_callback)
1387 * Hypervisor uses this for application faults while it executes.
1388 * We get here for two reasons:
1389 * 1. Fault while reloading DS, ES, FS or GS
1390 * 2. Fault while executing IRET
1391 * Category 1 we fix up by reattempting the load, and zeroing the segment
1392 * register if the load fails.
1393 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
1394 * normal Linux return path in this case because if we use the IRET hypercall
1395 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1396 * We distinguish between categories by maintaining a status value in EAX.
1398 ENTRY(xen_failsafe_callback)
1403 3: mov 12(%esp), %fs
1404 4: mov 16(%esp), %gs
1405 /* EAX == 0 => Category 1 (Bad segment)
1406 EAX != 0 => Category 2 (Bad IRET) */
1412 5: pushl $-1 /* orig_ax = -1 => not a system call */
1414 ENCODE_FRAME_POINTER
1415 jmp ret_from_exception
1417 .section .fixup, "ax"
1431 _ASM_EXTABLE(1b, 6b)
1432 _ASM_EXTABLE(2b, 7b)
1433 _ASM_EXTABLE(3b, 8b)
1434 _ASM_EXTABLE(4b, 9b)
1435 ENDPROC(xen_failsafe_callback)
1436 #endif /* CONFIG_XEN_PV */
1438 #ifdef CONFIG_XEN_PVHVM
1439 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1440 xen_evtchn_do_upcall)
1444 #if IS_ENABLED(CONFIG_HYPERV)
1446 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1447 hyperv_vector_handler)
1449 BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
1450 hyperv_reenlightenment_intr)
1452 BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1453 hv_stimer0_vector_handler)
1455 #endif /* CONFIG_HYPERV */
1459 pushl $do_page_fault
1460 jmp common_exception_read_cr2
1463 common_exception_read_cr2:
1464 /* the function address is in %gs's slot on the stack */
1465 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1467 ENCODE_FRAME_POINTER
1471 movl PT_GS(%esp), %edi
1475 GET_CR2_INTO(%ecx) # might clobber %eax
1477 /* fixup orig %eax */
1478 movl PT_ORIG_EAX(%esp), %edx # get the error code
1479 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1482 movl %esp, %eax # pt_regs pointer
1484 jmp ret_from_exception
1485 END(common_exception_read_cr2)
1488 /* the function address is in %gs's slot on the stack */
1489 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1490 ENCODE_FRAME_POINTER
1494 movl PT_GS(%esp), %edi # get the function address
1498 /* fixup orig %eax */
1499 movl PT_ORIG_EAX(%esp), %edx # get the error code
1500 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1503 movl %esp, %eax # pt_regs pointer
1505 jmp ret_from_exception
1506 END(common_exception)
1510 * Entry from sysenter is now handled in common_exception
1513 pushl $-1 # mark this as an int
1515 jmp common_exception
1519 * NMI is doubly nasty. It can happen on the first instruction of
1520 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1521 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1522 * switched stacks. We handle both conditions by simply checking whether we
1523 * interrupted kernel code running on the SYSENTER stack.
1528 #ifdef CONFIG_X86_ESPFIX32
1531 cmpw $__ESPFIX_SS, %ax
1533 je .Lnmi_espfix_stack
1536 pushl %eax # pt_regs->orig_ax
1537 SAVE_ALL_NMI cr3_reg=%edi
1538 ENCODE_FRAME_POINTER
1539 xorl %edx, %edx # zero error code
1540 movl %esp, %eax # pt_regs pointer
1542 /* Are we currently on the SYSENTER stack? */
1543 movl PER_CPU_VAR(cpu_entry_area), %ecx
1544 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1545 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1546 cmpl $SIZEOF_entry_stack, %ecx
1547 jb .Lnmi_from_sysenter_stack
1549 /* Not on SYSENTER stack. */
1553 .Lnmi_from_sysenter_stack:
1555 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1556 * is using the thread stack right now, so it's safe for us to use it.
1559 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1564 CHECK_AND_APPLY_ESPFIX
1565 RESTORE_ALL_NMI cr3_reg=%edi pop=4
1568 #ifdef CONFIG_X86_ESPFIX32
1571 * create the pointer to lss back
1576 /* copy the iret frame of 12 bytes */
1581 SAVE_ALL_NMI cr3_reg=%edi
1582 ENCODE_FRAME_POINTER
1583 FIXUP_ESPFIX_STACK # %eax == %esp
1584 xorl %edx, %edx # zero error code
1586 RESTORE_ALL_NMI cr3_reg=%edi
1587 lss 12+4(%esp), %esp # back to espfix stack
1594 pushl $-1 # mark this as an int
1596 SAVE_ALL switch_stacks=1
1597 ENCODE_FRAME_POINTER
1599 xorl %edx, %edx # zero error code
1600 movl %esp, %eax # pt_regs pointer
1602 jmp ret_from_exception
1605 ENTRY(general_protection)
1606 pushl $do_general_protection
1607 jmp common_exception
1608 END(general_protection)
1610 #ifdef CONFIG_KVM_GUEST
1611 ENTRY(async_page_fault)
1613 pushl $do_async_page_fault
1614 jmp common_exception_read_cr2
1615 END(async_page_fault)
1618 ENTRY(rewind_stack_do_exit)
1619 /* Prevent any naive code from trying to unwind to our caller. */
1622 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1623 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1627 END(rewind_stack_do_exit)