ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
authorMarc Zyngier <marc.zyngier@arm.com>
Mon, 15 Oct 2018 15:32:05 +0000 (11:32 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Oct 2018 07:16:27 +0000 (09:16 +0200)
Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

In order to avoid aliasing attacks against the branch predictor,
let's invalidate the BTB on guest exit. This is made complicated
by the fact that we cannot take a branch before invalidating the
BTB.

We only apply this to A12 and A17, which are the only two ARM
cores on which this useful.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Boot-tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/hyp/hyp-entry.S

index 14d68a4..b598e66 100644 (file)
@@ -61,8 +61,6 @@ struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
index 8a098e6..85d48c9 100644 (file)
@@ -246,7 +246,22 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
 
 static inline void *kvm_get_hyp_vector(void)
 {
-       return kvm_ksym_ref(__kvm_hyp_vector);
+       switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A17:
+       {
+               extern char __kvm_hyp_vector_bp_inv[];
+               return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+       }
+
+#endif
+       default:
+       {
+               extern char __kvm_hyp_vector[];
+               return kvm_ksym_ref(__kvm_hyp_vector);
+       }
+       }
 }
 
 static inline int kvm_map_vectors(void)
index 95a2fae..e789f52 100644 (file)
@@ -71,6 +71,66 @@ __kvm_hyp_vector:
        W(b)    hyp_irq
        W(b)    hyp_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       .align 5
+__kvm_hyp_vector_bp_inv:
+       .global __kvm_hyp_vector_bp_inv
+
+       /*
+        * We encode the exception entry in the bottom 3 bits of
+        * SP, and we have to guarantee to be 8 bytes aligned.
+        */
+       W(add)  sp, sp, #1      /* Reset          7 */
+       W(add)  sp, sp, #1      /* Undef          6 */
+       W(add)  sp, sp, #1      /* Syscall        5 */
+       W(add)  sp, sp, #1      /* Prefetch abort 4 */
+       W(add)  sp, sp, #1      /* Data abort     3 */
+       W(add)  sp, sp, #1      /* HVC            2 */
+       W(add)  sp, sp, #1      /* IRQ            1 */
+       W(nop)                  /* FIQ            0 */
+
+       mcr     p15, 0, r0, c7, c5, 6   /* BPIALL */
+       isb
+
+#ifdef CONFIG_THUMB2_KERNEL
+       /*
+        * Yet another silly hack: Use VPIDR as a temp register.
+        * Thumb2 is really a pain, as SP cannot be used with most
+        * of the bitwise instructions. The vect_br macro ensures
+        * things gets cleaned-up.
+        */
+       mcr     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mov     r0, sp
+       and     r0, r0, #7
+       sub     sp, sp, r0
+       push    {r1, r2}
+       mov     r1, r0
+       mrc     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mrc     p15, 0, r2, c0, c0, 0   /* MIDR  */
+       mcr     p15, 4, r2, c0, c0, 0   /* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM(   eor     sp, sp, #\val   )
+ARM(   tst     sp, #7          )
+ARM(   eorne   sp, sp, #\val   )
+
+THUMB( cmp     r1, #\val       )
+THUMB( popeq   {r1, r2}        )
+
+       beq     \targ
+.endm
+
+       vect_br 0, hyp_fiq
+       vect_br 1, hyp_irq
+       vect_br 2, hyp_hvc
+       vect_br 3, hyp_dabt
+       vect_br 4, hyp_pabt
+       vect_br 5, hyp_svc
+       vect_br 6, hyp_undef
+       vect_br 7, hyp_reset
+#endif
+
 .macro invalid_vector label, cause
        .align
 \label:        mov     r0, #\cause
@@ -149,7 +209,14 @@ hyp_hvc:
        bx      ip
 
 1:
-       push    {lr}
+       /*
+        * Pushing r2 here is just a way of keeping the stack aligned to
+        * 8 bytes on any path that can trigger a HYP exception. Here,
+        * we may well be about to jump into the guest, and the guest
+        * exit would otherwise be badly decoded by our fancy
+        * "decode-exception-without-a-branch" code...
+        */
+       push    {r2, lr}
 
        mov     lr, r0
        mov     r0, r1
@@ -159,7 +226,7 @@ hyp_hvc:
 THUMB( orr     lr, #1)
        blx     lr                      @ Call the HYP function
 
-       pop     {lr}
+       pop     {r2, lr}
        eret
 
 guest_trap: