s390: always save and restore all registers on context switch
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 20 Nov 2017 11:38:44 +0000 (12:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Dec 2017 08:52:56 +0000 (09:52 +0100)
commit fbbd7f1a51965b50dd12924841da0d478f3da71b upstream.

The switch_to() macro has an optimization to avoid saving and
restoring register contents that aren't needed for kernel threads.

There is however the possibility that a kernel thread execve's a user
space program. In such a case the execve'd process can partially see
the contents of the previous process, which shouldn't be allowed.

To avoid this, simply always save and restore register contents on
context switch.

Fixes: fdb6d070effba ("switch_to: dont restore/save access & fpu regs for kernel threads")
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/s390/include/asm/switch_to.h

index ec7b476..c61b2cc 100644 (file)
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
        asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
 }
 
-#define switch_to(prev,next,last) do {                                 \
-       if (prev->mm) {                                                 \
-               save_fpu_regs();                                        \
-               save_access_regs(&prev->thread.acrs[0]);                \
-               save_ri_cb(prev->thread.ri_cb);                         \
-               save_gs_cb(prev->thread.gs_cb);                         \
-       }                                                               \
+#define switch_to(prev, next, last) do {                               \
+       /* save_fpu_regs() sets the CIF_FPU flag, which enforces        \
+        * a restore of the floating point / vector registers as        \
+        * soon as the next task returns to user space                  \
+        */                                                             \
+       save_fpu_regs();                                                \
+       save_access_regs(&prev->thread.acrs[0]);                        \
+       save_ri_cb(prev->thread.ri_cb);                                 \
+       save_gs_cb(prev->thread.gs_cb);                                 \
        update_cr_regs(next);                                           \
-       if (next->mm) {                                                 \
-               set_cpu_flag(CIF_FPU);                                  \
-               restore_access_regs(&next->thread.acrs[0]);             \
-               restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
-               restore_gs_cb(next->thread.gs_cb);                      \
-       }                                                               \
-       prev = __switch_to(prev,next);                                  \
+       restore_access_regs(&next->thread.acrs[0]);                     \
+       restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);          \
+       restore_gs_cb(next->thread.gs_cb);                              \
+       prev = __switch_to(prev, next);                                 \
 } while (0)
 
 #endif /* __ASM_SWITCH_TO_H */