x86/32: Remove lazy GS macros
authorBrian Gerst <brgerst@gmail.com>
Fri, 25 Mar 2022 15:39:52 +0000 (11:39 -0400)
committerBorislav Petkov <bp@suse.de>
Thu, 14 Apr 2022 12:09:43 +0000 (14:09 +0200)
GS is always a user segment now.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20220325153953.162643-4-brgerst@gmail.com
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/segment.h
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/signal.c
arch/x86/kernel/vm86_32.c
arch/x86/lib/insn-eval.c
arch/x86/math-emu/get_address.c

index 2751604..b8d40dd 100644 (file)
@@ -141,7 +141,7 @@ do {                                                \
 #ifdef CONFIG_X86_32
 #define deactivate_mm(tsk, mm)                 \
 do {                                           \
-       lazy_load_gs(0);                        \
+       loadsegment(gs, 0);                     \
 } while (0)
 #else
 #define deactivate_mm(tsk, mm)                 \
index 656ed65..617b366 100644 (file)
@@ -354,11 +354,6 @@ static inline void __loadsegment_fs(unsigned short value)
  * x86-32 user GS accessors.  This is ugly and could do with some cleaning up.
  */
 #ifdef CONFIG_X86_32
-# define get_user_gs(regs)             (u16)({ unsigned long v; savesegment(gs, v); v; })
-# define set_user_gs(regs, v)          loadsegment(gs, (unsigned long)(v))
-# define task_user_gs(tsk)             ((tsk)->thread.gs)
-# define lazy_save_gs(v)               savesegment(gs, (v))
-# define lazy_load_gs(v)               loadsegment(gs, (v))
 # define load_gs_index(v)              loadsegment(gs, (v))
 #endif /* X86_32 */
 
index b370767..96a9885 100644 (file)
@@ -160,6 +160,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        savesegment(ds, p->thread.ds);
 #else
        p->thread.sp0 = (unsigned long) (childregs + 1);
+       savesegment(gs, p->thread.gs);
        /*
         * Clear all status flags including IF and set fixed bit. 64bit
         * does not have this initialization as the frame does not contain
@@ -191,10 +192,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        if (sp)
                childregs->sp = sp;
 
-#ifdef CONFIG_X86_32
-       task_user_gs(p) = get_user_gs(current_pt_regs());
-#endif
-
        if (unlikely(p->flags & PF_IO_WORKER)) {
                /*
                 * An IO thread is a user space thread, but it doesn't
index 26edb1c..877358f 100644 (file)
@@ -63,10 +63,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
        unsigned long d0, d1, d2, d3, d6, d7;
        unsigned short gs;
 
-       if (user_mode(regs))
-               gs = get_user_gs(regs);
-       else
-               savesegment(gs, gs);
+       savesegment(gs, gs);
 
        show_ip(regs, log_lvl);
 
@@ -114,7 +111,7 @@ void release_thread(struct task_struct *dead_task)
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       set_user_gs(regs, 0);
+       loadsegment(gs, 0);
        regs->fs                = 0;
        regs->ds                = __USER_DS;
        regs->es                = __USER_DS;
@@ -177,7 +174,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * used %fs or %gs (it does not today), or if the kernel is
         * running inside of a hypervisor layer.
         */
-       lazy_save_gs(prev->gs);
+       savesegment(gs, prev->gs);
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
@@ -208,7 +205,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Restore %gs if needed (which is common)
         */
        if (prev->gs | next->gs)
-               lazy_load_gs(next->gs);
+               loadsegment(gs, next->gs);
 
        this_cpu_write(current_task, next_p);
 
index 98d10ef..37c12fb 100644 (file)
@@ -170,9 +170,9 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
                retval = *pt_regs_access(task_pt_regs(task), offset);
        else {
                if (task == current)
-                       retval = get_user_gs(task_pt_regs(task));
+                       savesegment(gs, retval);
                else
-                       retval = task_user_gs(task);
+                       retval = task->thread.gs;
        }
        return retval;
 }
@@ -210,7 +210,7 @@ static int set_segment_reg(struct task_struct *task,
                break;
 
        case offsetof(struct user_regs_struct, gs):
-               task_user_gs(task) = value;
+               task->thread.gs = value;
        }
 
        return 0;
index e439eb1..9c7265b 100644 (file)
@@ -93,7 +93,7 @@ static bool restore_sigcontext(struct pt_regs *regs,
                return false;
 
 #ifdef CONFIG_X86_32
-       set_user_gs(regs, sc.gs);
+       loadsegment(gs, sc.gs);
        regs->fs = sc.fs;
        regs->es = sc.es;
        regs->ds = sc.ds;
@@ -146,8 +146,10 @@ __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
                     struct pt_regs *regs, unsigned long mask)
 {
 #ifdef CONFIG_X86_32
-       unsafe_put_user(get_user_gs(regs),
-                                 (unsigned int __user *)&sc->gs, Efault);
+       unsigned int gs;
+       savesegment(gs, gs);
+
+       unsafe_put_user(gs,       (unsigned int __user *)&sc->gs, Efault);
        unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault);
        unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault);
        unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
index c21bcd6..e9e803a 100644 (file)
@@ -151,7 +151,7 @@ exit_vm86:
 
        memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
 
-       lazy_load_gs(vm86->regs32.gs);
+       loadsegment(gs, vm86->regs32.gs);
 
        regs->pt.ax = retval;
        return;
@@ -325,7 +325,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
  * Save old state
  */
        vm86->saved_sp0 = tsk->thread.sp0;
-       lazy_save_gs(vm86->regs32.gs);
+       savesegment(gs, vm86->regs32.gs);
 
        /* make room for real-mode segments */
        preempt_disable();
index b781d32..21104c4 100644 (file)
@@ -342,9 +342,9 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff)
  */
 static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
 {
-#ifdef CONFIG_X86_64
        unsigned short sel;
 
+#ifdef CONFIG_X86_64
        switch (seg_reg_idx) {
        case INAT_SEG_REG_IGNORE:
                return 0;
@@ -402,7 +402,8 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
        case INAT_SEG_REG_FS:
                return (unsigned short)(regs->fs & 0xffff);
        case INAT_SEG_REG_GS:
-               return get_user_gs(regs);
+               savesegment(gs, sel);
+               return sel;
        case INAT_SEG_REG_IGNORE:
        default:
                return -EINVAL;
index b82ca14..4a9fd90 100644 (file)
@@ -153,7 +153,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
        switch (segment) {
        case PREFIX_GS_ - 1:
                /* user gs handling can be lazy, use special accessors */
-               addr->selector = get_user_gs(FPU_info->regs);
+               savesegment(gs, addr->selector);
                break;
        default:
                addr->selector = PM_REG_(segment);