1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1995 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * CPU hotplug support - ashok.raj@intel.com
15 * This file handles the architecture-dependent parts of process handling..
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/task_stack.h>
24 #include <linux/kernel.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/ptrace.h>
34 #include <linux/notifier.h>
35 #include <linux/kprobes.h>
36 #include <linux/kdebug.h>
37 #include <linux/prctl.h>
38 #include <linux/uaccess.h>
40 #include <linux/ftrace.h>
41 #include <linux/syscalls.h>
43 #include <asm/processor.h>
45 #include <asm/fpu/sched.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/resctrl.h>
56 #include <asm/unistd.h>
57 #include <asm/fsgsbase.h>
58 #ifdef CONFIG_IA32_EMULATION
59 /* Not included via unistd.h */
60 #include <asm/unistd_32_ia32.h>
65 /* Prints also some state that isn't saved in the pt_regs */
66 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
69 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
70 unsigned long d0, d1, d2, d3, d6, d7;
71 unsigned int fsindex, gsindex;
74 show_iret_regs(regs, log_lvl);
76 if (regs->orig_ax != -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
81 printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n",
82 log_lvl, regs->ax, regs->bx, regs->cx);
83 printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n",
84 log_lvl, regs->dx, regs->si, regs->di);
85 printk("%sRBP: %016lx R08: %016lx R09: %016lx\n",
86 log_lvl, regs->bp, regs->r8, regs->r9);
87 printk("%sR10: %016lx R11: %016lx R12: %016lx\n",
88 log_lvl, regs->r10, regs->r11, regs->r12);
89 printk("%sR13: %016lx R14: %016lx R15: %016lx\n",
90 log_lvl, regs->r13, regs->r14, regs->r15);
92 if (mode == SHOW_REGS_SHORT)
95 if (mode == SHOW_REGS_USER) {
96 rdmsrl(MSR_FS_BASE, fs);
97 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk("%sFS: %016lx GS: %016lx\n",
99 log_lvl, fs, shadowgs);
103 asm("movl %%ds,%0" : "=r" (ds));
104 asm("movl %%es,%0" : "=r" (es));
105 asm("movl %%fs,%0" : "=r" (fsindex));
106 asm("movl %%gs,%0" : "=r" (gsindex));
108 rdmsrl(MSR_FS_BASE, fs);
109 rdmsrl(MSR_GS_BASE, gs);
110 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
117 printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
118 log_lvl, fs, fsindex, gs, gsindex, shadowgs);
119 printk("%sCS: %04lx DS: %04x ES: %04x CR0: %016lx\n",
120 log_lvl, regs->cs, ds, es, cr0);
121 printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
122 log_lvl, cr2, cr3, cr4);
131 /* Only print out debug registers if they are in their non-default state. */
132 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
133 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
134 printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n",
135 log_lvl, d0, d1, d2);
136 printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n",
137 log_lvl, d3, d6, d7);
140 if (cpu_feature_enabled(X86_FEATURE_OSPKE))
141 printk("%sPKRU: %08x\n", log_lvl, read_pkru());
144 void release_thread(struct task_struct *dead_task)
146 WARN_ON(dead_task->mm);
149 enum which_selector {
155 * Out of line to be protected from kprobes and tracing. If this would be
156 * traced or probed than any access to a per CPU variable happens with
159 * It is not used on Xen paravirt. When paravirt support is needed, it
160 * needs to be renamed with native_ prefix.
162 static noinstr unsigned long __rdgsbase_inactive(void)
164 unsigned long gsbase;
166 lockdep_assert_irqs_disabled();
168 if (!static_cpu_has(X86_FEATURE_XENPV)) {
173 instrumentation_begin();
174 rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
175 instrumentation_end();
182 * Out of line to be protected from kprobes and tracing. If this would be
183 * traced or probed than any access to a per CPU variable happens with
186 * It is not used on Xen paravirt. When paravirt support is needed, it
187 * needs to be renamed with native_ prefix.
189 static noinstr void __wrgsbase_inactive(unsigned long gsbase)
191 lockdep_assert_irqs_disabled();
193 if (!static_cpu_has(X86_FEATURE_XENPV)) {
198 instrumentation_begin();
199 wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
200 instrumentation_end();
205 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
206 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
207 * It's forcibly inlined because it'll generate better code and this function
210 static __always_inline void save_base_legacy(struct task_struct *prev_p,
211 unsigned short selector,
212 enum which_selector which)
214 if (likely(selector == 0)) {
216 * On Intel (without X86_BUG_NULL_SEG), the segment base could
217 * be the pre-existing saved base or it could be zero. On AMD
218 * (with X86_BUG_NULL_SEG), the segment base could be almost
221 * This branch is very hot (it's hit twice on almost every
222 * context switch between 64-bit programs), and avoiding
223 * the RDMSR helps a lot, so we just assume that whatever
224 * value is already saved is correct. This matches historical
225 * Linux behavior, so it won't break existing applications.
227 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
228 * report that the base is zero, it needs to actually be zero:
229 * see the corresponding logic in load_seg_legacy.
233 * If the selector is 1, 2, or 3, then the base is zero on
234 * !X86_BUG_NULL_SEG CPUs and could be anything on
235 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
236 * has never attempted to preserve the base across context
239 * If selector > 3, then it refers to a real segment, and
240 * saving the base isn't necessary.
243 prev_p->thread.fsbase = 0;
245 prev_p->thread.gsbase = 0;
249 static __always_inline void save_fsgs(struct task_struct *task)
251 savesegment(fs, task->thread.fsindex);
252 savesegment(gs, task->thread.gsindex);
253 if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
255 * If FSGSBASE is enabled, we can't make any useful guesses
256 * about the base, and user code expects us to save the current
257 * value. Fortunately, reading the base directly is efficient.
259 task->thread.fsbase = rdfsbase();
260 task->thread.gsbase = __rdgsbase_inactive();
262 save_base_legacy(task, task->thread.fsindex, FS);
263 save_base_legacy(task, task->thread.gsindex, GS);
268 * While a process is running,current->thread.fsbase and current->thread.gsbase
269 * may not match the corresponding CPU registers (see save_base_legacy()).
271 void current_save_fsgs(void)
275 /* Interrupts need to be off for FSGSBASE */
276 local_irq_save(flags);
278 local_irq_restore(flags);
280 #if IS_ENABLED(CONFIG_KVM)
281 EXPORT_SYMBOL_GPL(current_save_fsgs);
284 static __always_inline void loadseg(enum which_selector which,
288 loadsegment(fs, sel);
293 static __always_inline void load_seg_legacy(unsigned short prev_index,
294 unsigned long prev_base,
295 unsigned short next_index,
296 unsigned long next_base,
297 enum which_selector which)
299 if (likely(next_index <= 3)) {
301 * The next task is using 64-bit TLS, is not using this
302 * segment at all, or is having fun with arcane CPU features.
304 if (next_base == 0) {
306 * Nasty case: on AMD CPUs, we need to forcibly zero
309 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
310 loadseg(which, __USER_DS);
311 loadseg(which, next_index);
314 * We could try to exhaustively detect cases
315 * under which we can skip the segment load,
316 * but there's really only one case that matters
317 * for performance: if both the previous and
318 * next states are fully zeroed, we can skip
321 * (This assumes that prev_base == 0 has no
322 * false positives. This is the case on
325 if (likely(prev_index | next_index | prev_base))
326 loadseg(which, next_index);
329 if (prev_index != next_index)
330 loadseg(which, next_index);
331 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
336 * The next task is using a real segment. Loading the selector
339 loadseg(which, next_index);
344 * Store prev's PKRU value and load next's PKRU value if they differ. PKRU
345 * is not XSTATE managed on context switch because that would require a
346 * lookup in the task's FPU xsave buffer and require to keep that updated
349 static __always_inline void x86_pkru_load(struct thread_struct *prev,
350 struct thread_struct *next)
352 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
355 /* Stash the prev task's value: */
356 prev->pkru = rdpkru();
359 * PKRU writes are slightly expensive. Avoid them when not
360 * strictly necessary:
362 if (prev->pkru != next->pkru)
366 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
367 struct thread_struct *next)
369 if (static_cpu_has(X86_FEATURE_FSGSBASE)) {
370 /* Update the FS and GS selectors if they could have changed. */
371 if (unlikely(prev->fsindex || next->fsindex))
372 loadseg(FS, next->fsindex);
373 if (unlikely(prev->gsindex || next->gsindex))
374 loadseg(GS, next->gsindex);
376 /* Update the bases. */
377 wrfsbase(next->fsbase);
378 __wrgsbase_inactive(next->gsbase);
380 load_seg_legacy(prev->fsindex, prev->fsbase,
381 next->fsindex, next->fsbase, FS);
382 load_seg_legacy(prev->gsindex, prev->gsbase,
383 next->gsindex, next->gsbase, GS);
387 unsigned long x86_fsgsbase_read_task(struct task_struct *task,
388 unsigned short selector)
390 unsigned short idx = selector >> 3;
393 if (likely((selector & SEGMENT_TI_MASK) == 0)) {
394 if (unlikely(idx >= GDT_ENTRIES))
398 * There are no user segments in the GDT with nonzero bases
399 * other than the TLS segments.
401 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
404 idx -= GDT_ENTRY_TLS_MIN;
405 base = get_desc_base(&task->thread.tls_array[idx]);
407 #ifdef CONFIG_MODIFY_LDT_SYSCALL
408 struct ldt_struct *ldt;
411 * If performance here mattered, we could protect the LDT
412 * with RCU. This is a slow path, though, so we can just
415 mutex_lock(&task->mm->context.lock);
416 ldt = task->mm->context.ldt;
417 if (unlikely(!ldt || idx >= ldt->nr_entries))
420 base = get_desc_base(ldt->entries + idx);
421 mutex_unlock(&task->mm->context.lock);
430 unsigned long x86_gsbase_read_cpu_inactive(void)
432 unsigned long gsbase;
434 if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
437 local_irq_save(flags);
438 gsbase = __rdgsbase_inactive();
439 local_irq_restore(flags);
441 rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
447 void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
449 if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
452 local_irq_save(flags);
453 __wrgsbase_inactive(gsbase);
454 local_irq_restore(flags);
456 wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
460 unsigned long x86_fsbase_read_task(struct task_struct *task)
462 unsigned long fsbase;
465 fsbase = x86_fsbase_read_cpu();
466 else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
467 (task->thread.fsindex == 0))
468 fsbase = task->thread.fsbase;
470 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
475 unsigned long x86_gsbase_read_task(struct task_struct *task)
477 unsigned long gsbase;
480 gsbase = x86_gsbase_read_cpu_inactive();
481 else if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
482 (task->thread.gsindex == 0))
483 gsbase = task->thread.gsbase;
485 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
490 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
492 WARN_ON_ONCE(task == current);
494 task->thread.fsbase = fsbase;
497 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
499 WARN_ON_ONCE(task == current);
501 task->thread.gsbase = gsbase;
505 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
506 unsigned long new_sp,
507 unsigned int _cs, unsigned int _ss, unsigned int _ds)
509 WARN_ON_ONCE(regs != current_pt_regs());
511 if (static_cpu_has(X86_BUG_NULL_SEG)) {
512 /* Loading zero below won't clear the base. */
513 loadsegment(fs, __USER_DS);
514 load_gs_index(__USER_DS);
518 loadsegment(es, _ds);
519 loadsegment(ds, _ds);
526 regs->flags = X86_EFLAGS_IF;
530 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
532 start_thread_common(regs, new_ip, new_sp,
533 __USER_CS, __USER_DS, 0);
535 EXPORT_SYMBOL_GPL(start_thread);
538 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
540 start_thread_common(regs, new_ip, new_sp,
541 x32 ? __USER_CS : __USER32_CS,
542 __USER_DS, __USER_DS);
547 * switch_to(x,y) should switch tasks from x to y.
549 * This could still be optimized:
550 * - fold all the options into a flag word and test it with a single test.
551 * - could test fs/gs bitsliced
553 * Kprobes not supported here. Set the probe on schedule instead.
554 * Function graph tracer not supported too.
556 __visible __notrace_funcgraph struct task_struct *
557 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
559 struct thread_struct *prev = &prev_p->thread;
560 struct thread_struct *next = &next_p->thread;
561 struct fpu *prev_fpu = &prev->fpu;
562 int cpu = smp_processor_id();
564 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
565 this_cpu_read(hardirq_stack_inuse));
567 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
568 switch_fpu_prepare(prev_fpu, cpu);
570 /* We must save %fs and %gs before load_TLS() because
571 * %fs and %gs may be cleared by load_TLS().
573 * (e.g. xen_load_tls())
578 * Load TLS before restoring any segments so that segment loads
579 * reference the correct GDT entries.
584 * Leave lazy mode, flushing any hypercalls made here. This
585 * must be done after loading TLS entries in the GDT but before
586 * loading segments that might reference them.
588 arch_end_context_switch(next_p);
592 * Reading them only returns the selectors, but writing them (if
593 * nonzero) loads the full descriptor from the GDT or LDT. The
594 * LDT for next is loaded in switch_mm, and the GDT is loaded
597 * We therefore need to write new values to the segment
598 * registers on every context switch unless both the new and old
601 * Note that we don't need to do anything for CS and SS, as
602 * those are saved and restored as part of pt_regs.
604 savesegment(es, prev->es);
605 if (unlikely(next->es | prev->es))
606 loadsegment(es, next->es);
608 savesegment(ds, prev->ds);
609 if (unlikely(next->ds | prev->ds))
610 loadsegment(ds, next->ds);
612 x86_fsgsbase_load(prev, next);
614 x86_pkru_load(prev, next);
617 * Switch the PDA and FPU contexts.
619 this_cpu_write(current_task, next_p);
620 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
625 update_task_stack(next_p);
627 switch_to_extra(prev_p, next_p);
629 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
631 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
632 * does not update the cached descriptor. As a result, if we
633 * do SYSRET while SS is NULL, we'll end up in user mode with
634 * SS apparently equal to __USER_DS but actually unusable.
636 * The straightforward workaround would be to fix it up just
637 * before SYSRET, but that would slow down the system call
638 * fast paths. Instead, we ensure that SS is never NULL in
639 * system call context. We do this by replacing NULL SS
640 * selectors at every context switch. SYSCALL sets up a valid
641 * SS, so the only way to get NULL is to re-enter the kernel
642 * from CPL 3 through an interrupt. Since that can't happen
643 * in the same task as a running syscall, we are guaranteed to
644 * context switch between every interrupt vector entry and a
647 * We read SS first because SS reads are much faster than
648 * writes. Out of caution, we force SS to __KERNEL_DS even if
649 * it previously had a different non-NULL value.
651 unsigned short ss_sel;
652 savesegment(ss, ss_sel);
653 if (ss_sel != __KERNEL_DS)
654 loadsegment(ss, __KERNEL_DS);
657 /* Load the Intel cache allocation PQR MSR. */
663 void set_personality_64bit(void)
665 /* inherit personality from parent */
667 /* Make sure to be in 64bit mode */
668 clear_thread_flag(TIF_ADDR32);
669 /* Pretend that this comes from a 64bit execve */
670 task_pt_regs(current)->orig_ax = __NR_execve;
671 current_thread_info()->status &= ~TS_COMPAT;
673 current->mm->context.flags = MM_CONTEXT_HAS_VSYSCALL;
675 /* TBD: overwrites user setup. Should have two bits.
676 But 64bit processes have always behaved this way,
677 so it's not too bad. The main problem is just that
678 32bit children are affected again. */
679 current->personality &= ~READ_IMPLIES_EXEC;
682 static void __set_personality_x32(void)
684 #ifdef CONFIG_X86_X32
686 current->mm->context.flags = 0;
688 current->personality &= ~READ_IMPLIES_EXEC;
690 * in_32bit_syscall() uses the presence of the x32 syscall bit
691 * flag to determine compat status. The x86 mmap() code relies on
692 * the syscall bitness so set x32 syscall bit right here to make
693 * in_32bit_syscall() work during exec().
695 * Pretend to come from a x32 execve.
697 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
698 current_thread_info()->status &= ~TS_COMPAT;
702 static void __set_personality_ia32(void)
704 #ifdef CONFIG_IA32_EMULATION
707 * uprobes applied to this MM need to know this and
708 * cannot use user_64bit_mode() at that time.
710 current->mm->context.flags = MM_CONTEXT_UPROBE_IA32;
713 current->personality |= force_personality32;
714 /* Prepare the first "return" to user space */
715 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
716 current_thread_info()->status |= TS_COMPAT;
720 void set_personality_ia32(bool x32)
722 /* Make sure to be in 32bit mode */
723 set_thread_flag(TIF_ADDR32);
726 __set_personality_x32();
728 __set_personality_ia32();
730 EXPORT_SYMBOL_GPL(set_personality_ia32);
732 #ifdef CONFIG_CHECKPOINT_RESTORE
733 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
737 ret = map_vdso_once(image, addr);
741 return (long)image->size;
745 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
751 if (unlikely(arg2 >= TASK_SIZE_MAX))
756 * ARCH_SET_GS has always overwritten the index
757 * and the base. Zero is the most sensible value
758 * to put in the index, and is the only value that
759 * makes any sense if FSGSBASE is unavailable.
761 if (task == current) {
763 x86_gsbase_write_cpu_inactive(arg2);
766 * On non-FSGSBASE systems, save_base_legacy() expects
767 * that we also fill in thread.gsbase.
769 task->thread.gsbase = arg2;
772 task->thread.gsindex = 0;
773 x86_gsbase_write_task(task, arg2);
780 * Not strictly needed for %fs, but do it for symmetry
783 if (unlikely(arg2 >= TASK_SIZE_MAX))
788 * Set the selector to 0 for the same reason
791 if (task == current) {
793 x86_fsbase_write_cpu(arg2);
796 * On non-FSGSBASE systems, save_base_legacy() expects
797 * that we also fill in thread.fsbase.
799 task->thread.fsbase = arg2;
801 task->thread.fsindex = 0;
802 x86_fsbase_write_task(task, arg2);
808 unsigned long base = x86_fsbase_read_task(task);
810 ret = put_user(base, (unsigned long __user *)arg2);
814 unsigned long base = x86_gsbase_read_task(task);
816 ret = put_user(base, (unsigned long __user *)arg2);
820 #ifdef CONFIG_CHECKPOINT_RESTORE
821 # ifdef CONFIG_X86_X32_ABI
822 case ARCH_MAP_VDSO_X32:
823 return prctl_map_vdso(&vdso_image_x32, arg2);
825 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
826 case ARCH_MAP_VDSO_32:
827 return prctl_map_vdso(&vdso_image_32, arg2);
829 case ARCH_MAP_VDSO_64:
830 return prctl_map_vdso(&vdso_image_64, arg2);
841 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
845 ret = do_arch_prctl_64(current, option, arg2);
847 ret = do_arch_prctl_common(current, option, arg2);
852 #ifdef CONFIG_IA32_EMULATION
853 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
855 return do_arch_prctl_common(current, option, arg2);
859 unsigned long KSTK_ESP(struct task_struct *task)
861 return task_pt_regs(task)->sp;