ARM: Spectre-BHB workaround
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Thu, 10 Feb 2022 16:05:45 +0000 (16:05 +0000)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Sat, 5 Mar 2022 10:42:07 +0000 (10:42 +0000)
Workaround the Spectre BHB issues for Cortex-A15, Cortex-A57,
Cortex-A72, Cortex-A73 and Cortex-A75. We also include Brahma B15 as
well to be safe, which is affected by Spectre V2 in the same ways as
Cortex-A15.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
arch/arm/include/asm/assembler.h
arch/arm/include/asm/spectre.h
arch/arm/include/asm/vmlinux.lds.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/spectre.c
arch/arm/kernel/traps.c
arch/arm/mm/Kconfig
arch/arm/mm/proc-v7-bugs.c

index 7d23d4b..49ea603 100644 (file)
        .endm
 #endif
 
+#if __LINUX_ARM_ARCH__ < 7
+       .macro  dsb, args
+       mcr     p15, 0, r0, c7, c10, 4
+       .endm
+
+       .macro  isb, args
+       mcr     p15, 0, r0, c7, r5, 4
+       .endm
+#endif
+
        .macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
        .if \save
index 8a9019e..d1fa560 100644 (file)
@@ -14,6 +14,7 @@ enum {
        __SPECTRE_V2_METHOD_ICIALLU,
        __SPECTRE_V2_METHOD_SMC,
        __SPECTRE_V2_METHOD_HVC,
+       __SPECTRE_V2_METHOD_LOOP8,
 };
 
 enum {
@@ -21,8 +22,11 @@ enum {
        SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
        SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
        SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
+       SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
 };
 
 void spectre_v2_update_state(unsigned int state, unsigned int methods);
 
+int spectre_bhb_update_vectors(unsigned int method);
+
 #endif
index e02710d..0ef21bf 100644 (file)
  */
 #define ARM_VECTORS                                                    \
        __vectors_lma = .;                                              \
-       .vectors 0xffff0000 : AT(__vectors_start) {                     \
-               *(.vectors)                                             \
+       OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
+               .vectors {                                              \
+                       *(.vectors)                                     \
+               }                                                       \
+               .vectors.bhb.loop8 {                                    \
+                       *(.vectors.bhb.loop8)                           \
+               }                                                       \
+               .vectors.bhb.bpiall {                                   \
+                       *(.vectors.bhb.bpiall)                          \
+               }                                                       \
        }                                                               \
        ARM_LMA(__vectors, .vectors);                                   \
-       . = __vectors_lma + SIZEOF(.vectors);                           \
+       ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
+       ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
+       . = __vectors_lma + SIZEOF(.vectors) +                          \
+               SIZEOF(.vectors.bhb.loop8) +                            \
+               SIZEOF(.vectors.bhb.bpiall);                            \
                                                                        \
        __stubs_lma = .;                                                \
        .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
index 5cd0578..676703c 100644 (file)
@@ -1002,12 +1002,11 @@ vector_\name:
        sub     lr, lr, #\correction
        .endif
 
-       @
-       @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
-       @ (parent CPSR)
-       @
+       @ Save r0, lr_<exception> (parent PC)
        stmia   sp, {r0, lr}            @ save r0, lr
-       mrs     lr, spsr
+
+       @ Save spsr_<exception> (parent CPSR)
+2:     mrs     lr, spsr
        str     lr, [sp, #8]            @ save spsr
 
        @
@@ -1028,6 +1027,44 @@ vector_\name:
        movs    pc, lr                  @ branch to handler in SVC mode
 ENDPROC(vector_\name)
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .subsection 1
+       .align 5
+vector_bhb_loop8_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mov     r0, #8
+1:     b       . + 4
+       subs    r0, r0, #1
+       bne     1b
+       dsb
+       isb
+       b       2b
+ENDPROC(vector_bhb_loop8_\name)
+
+vector_bhb_bpiall_\name:
+       .if \correction
+       sub     lr, lr, #\correction
+       .endif
+
+       @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}
+
+       @ bhb workaround
+       mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
+       @ isb not needed due to "movs pc, lr" in the vector stub
+       @ which gives a "context synchronisation".
+       b       2b
+ENDPROC(vector_bhb_bpiall_\name)
+       .previous
+#endif
+
        .align  2
        @ handler addresses follow this label
 1:
@@ -1036,6 +1073,10 @@ ENDPROC(vector_\name)
        .section .stubs, "ax", %progbits
        @ This must be the first word
        .word   vector_swi
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .word   vector_bhb_loop8_swi
+       .word   vector_bhb_bpiall_swi
+#endif
 
 vector_rst:
  ARM(  swi     SYS_ERROR0      )
@@ -1150,8 +1191,10 @@ vector_addrexcptn:
  * FIQ "NMI" handler
  *-----------------------------------------------------------------------------
  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
- * systems.
+ * systems. This must be the last vector stub, so lets place it in its own
+ * subsection.
  */
+       .subsection 2
        vector_stub     fiq, FIQ_MODE, 4
 
        .long   __fiq_usr                       @  0  (USR_26 / USR_32)
@@ -1184,6 +1227,30 @@ vector_addrexcptn:
        W(b)    vector_irq
        W(b)    vector_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+       .section .vectors.bhb.loop8, "ax", %progbits
+.L__vectors_bhb_loop8_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_loop8_und
+       W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
+       W(b)    vector_bhb_loop8_pabt
+       W(b)    vector_bhb_loop8_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_loop8_irq
+       W(b)    vector_bhb_loop8_fiq
+
+       .section .vectors.bhb.bpiall, "ax", %progbits
+.L__vectors_bhb_bpiall_start:
+       W(b)    vector_rst
+       W(b)    vector_bhb_bpiall_und
+       W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
+       W(b)    vector_bhb_bpiall_pabt
+       W(b)    vector_bhb_bpiall_dabt
+       W(b)    vector_addrexcptn
+       W(b)    vector_bhb_bpiall_irq
+       W(b)    vector_bhb_bpiall_fiq
+#endif
+
        .data
        .align  2
 
index ac86c34..dbc1913 100644 (file)
@@ -154,12 +154,36 @@ ENDPROC(ret_from_fork)
  */
 
        .align  5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ENTRY(vector_bhb_loop8_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mov     r8, #8
+1:     b       2f
+2:     subs    r8, r8, #1
+       bne     1b
+       dsb
+       isb
+       b       3f
+ENDPROC(vector_bhb_loop8_swi)
+
+       .align  5
+ENTRY(vector_bhb_bpiall_swi)
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}
+       mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
+       isb
+       b       3f
+ENDPROC(vector_bhb_bpiall_swi)
+#endif
+       .align  5
 ENTRY(vector_swi)
 #ifdef CONFIG_CPU_V7M
        v7m_exception_entry
 #else
        sub     sp, sp, #PT_REGS_SIZE
        stmia   sp, {r0 - r12}                  @ Calling r0 - r12
+3:
  ARM(  add     r8, sp, #S_PC           )
  ARM(  stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
  THUMB(        mov     r8, sp                  )
index 6f6dd1c..ade967f 100644 (file)
@@ -45,6 +45,10 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
                method = "Firmware call";
                break;
 
+       case SPECTRE_V2_METHOD_LOOP8:
+               method = "History overwrite";
+               break;
+
        default:
                method = "Multiple mitigations";
                break;
index abc82df..90c887a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/exception.h>
+#include <asm/spectre.h>
 #include <asm/unistd.h>
 #include <asm/traps.h>
 #include <asm/ptrace.h>
@@ -801,6 +802,43 @@ static void flush_vectors(void *vma, size_t offset, size_t size)
        flush_icache_range(start, end);
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+       extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+       extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+       void *vec_start, *vec_end;
+
+       if (system_state >= SYSTEM_FREEING_INITMEM) {
+               pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+                      smp_processor_id());
+               return SPECTRE_VULNERABLE;
+       }
+
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               vec_start = __vectors_bhb_loop8_start;
+               vec_end = __vectors_bhb_loop8_end;
+               break;
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               vec_start = __vectors_bhb_bpiall_start;
+               vec_end = __vectors_bhb_bpiall_end;
+               break;
+
+       default:
+               pr_err("CPU%u: unknown Spectre BHB state %d\n",
+                      smp_processor_id(), method);
+               return SPECTRE_VULNERABLE;
+       }
+
+       copy_from_lma(vectors_page, vec_start, vec_end);
+       flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+       return SPECTRE_MITIGATED;
+}
+#endif
+
 void __init early_trap_init(void *vectors_base)
 {
        extern char __stubs_start[], __stubs_end[];
index 850329e..9724c16 100644 (file)
@@ -851,6 +851,16 @@ config HARDEN_BRANCH_PREDICTOR
 
           If unsure, say Y.
 
+config HARDEN_BRANCH_HISTORY
+       bool "Harden Spectre style attacks against branch history" if EXPERT
+       depends on CPU_SPECTRE
+       default y
+       help
+         Speculation attacks against some high-performance processors can
+         make use of branch history to influence future speculation. When
+         taking an exception, a sequence of branches overwrites the branch
+         history, or branch history is invalidated.
+
 config TLS_REG_EMUL
        bool
        select NEED_KUSER_HELPERS
index e438e59..c226fea 100644 (file)
@@ -177,6 +177,81 @@ static void cpu_v7_spectre_v2_init(void)
        spectre_v2_update_state(state, method);
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+static int spectre_bhb_method;
+
+static const char *spectre_bhb_method_name(int method)
+{
+       switch (method) {
+       case SPECTRE_V2_METHOD_LOOP8:
+               return "loop";
+
+       case SPECTRE_V2_METHOD_BPIALL:
+               return "BPIALL";
+
+       default:
+               return "unknown";
+       }
+}
+
+static int spectre_bhb_install_workaround(int method)
+{
+       if (spectre_bhb_method != method) {
+               if (spectre_bhb_method) {
+                       pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
+                              smp_processor_id());
+
+                       return SPECTRE_VULNERABLE;
+               }
+
+               if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
+                       return SPECTRE_VULNERABLE;
+
+               spectre_bhb_method = method;
+       }
+
+       pr_info("CPU%u: Spectre BHB: using %s workaround\n",
+               smp_processor_id(), spectre_bhb_method_name(method));
+
+       return SPECTRE_MITIGATED;
+}
+#else
+static int spectre_bhb_install_workaround(int method)
+{
+       return SPECTRE_VULNERABLE;
+}
+#endif
+
+static void cpu_v7_spectre_bhb_init(void)
+{
+       unsigned int state, method = 0;
+
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_LOOP8;
+               break;
+
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+               state = SPECTRE_MITIGATED;
+               method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+
+       default:
+               state = SPECTRE_UNAFFECTED;
+               break;
+       }
+
+       if (state == SPECTRE_MITIGATED)
+               state = spectre_bhb_install_workaround(method);
+
+       spectre_v2_update_state(state, method);
+}
+
 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
                                                  u32 mask, const char *msg)
 {
@@ -217,4 +292,5 @@ void cpu_v7_ca15_ibe(void)
 void cpu_v7_bugs_init(void)
 {
        cpu_v7_spectre_v2_init();
+       cpu_v7_spectre_bhb_init();
 }