x86/paravirt: Add stack frame dependency to PVOP inline asm calls
authorJosh Poimboeuf <jpoimboe@redhat.com>
Thu, 21 Jan 2016 22:49:12 +0000 (16:49 -0600)
committerIngo Molnar <mingo@kernel.org>
Wed, 24 Feb 2016 07:35:42 +0000 (08:35 +0100)
If a PVOP call macro is inlined at the beginning of a function, gcc can
insert the call instruction before setting up a stack frame, which
breaks frame pointer convention if CONFIG_FRAME_POINTER is enabled and
can result in a bad stack trace.

Force a stack frame to be created if CONFIG_FRAME_POINTER is enabled by
listing the stack pointer as an output operand for the PVOP inline asm
statements.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Bernd Petrovitsch <bernd@petrovitsch.priv.at>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chris J Arges <chris.j.arges@canonical.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Pedro Alves <palves@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: live-patching@vger.kernel.org
Link: http://lkml.kernel.org/r/6a13e48c5a8cf2de1aa112ae2d4c0ac194096282.1453405861.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/paravirt_types.h

index 77db561..e8c2326 100644 (file)
@@ -466,8 +466,9 @@ int paravirt_disable_iospace(void);
  * makes sure the incoming and outgoing types are always correct.
  */
 #ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS                                \
-       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+#define PVOP_VCALL_ARGS                                                        \
+       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;      \
+       register void *__sp asm("esp")
 #define PVOP_CALL_ARGS                 PVOP_VCALL_ARGS
 
 #define PVOP_CALL_ARG1(x)              "a" ((unsigned long)(x))
@@ -485,9 +486,10 @@ int paravirt_disable_iospace(void);
 #define VEXTRA_CLOBBERS
 #else  /* CONFIG_X86_64 */
 /* [re]ax isn't an arg, but the return val */
-#define PVOP_VCALL_ARGS                                        \
-       unsigned long __edi = __edi, __esi = __esi,     \
-               __edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_VCALL_ARGS                                                \
+       unsigned long __edi = __edi, __esi = __esi,             \
+               __edx = __edx, __ecx = __ecx, __eax = __eax;    \
+       register void *__sp asm("rsp")
 #define PVOP_CALL_ARGS         PVOP_VCALL_ARGS
 
 #define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
@@ -526,7 +528,7 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : call_clbr                        \
+                                    : call_clbr, "+r" (__sp)           \
                                     : paravirt_type(op),               \
                                       paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
@@ -536,7 +538,7 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : call_clbr                        \
+                                    : call_clbr, "+r" (__sp)           \
                                     : paravirt_type(op),               \
                                       paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
@@ -563,7 +565,7 @@ int paravirt_disable_iospace(void);
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
-                            : call_clbr                                \
+                            : call_clbr, "+r" (__sp)                   \
                             : paravirt_type(op),                       \
                               paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \